code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from __future__ import absolute_import
from rapidsms.message import Message, EmailMessage
from rapidsms.connection import Connection
from . import backend
import imaplib
import time
import smtplib
import re
import Queue
from datetime import datetime
from email import message_from_string
from email.mime.text import MIMEText
class Backend(backend.Backend):
'''Backend to interact with email. Link this to an smtp and imap account.
The account will be polled and every unread message will be sent (the
body) to the router as if it was an SMS. As soon as messages are found
they are marked read.
This backend creates EmailMessage messages, which are an extension of
messages that include a subject and mime_type. Currently we do not
do anything smart with attachments.
'''
_title = "Email"
def configure(self, smtp_host="localhost", smtp_port=25,
imap_host="localhost", imap_port=143,
username="[email protected]",
password="secret",
use_tls=True, poll_interval=60):
# the default information will not work, users need to configure this
# in their settings
self.smtp_host = smtp_host
self.smtp_port = int(smtp_port)
self.imap_host = imap_host
self.imap_port = int(imap_port)
self.username = username
self.password = password
self.use_tls = use_tls
self.poll_interval = int(poll_interval)
def _send(self, email_message):
# Create a text/plain message for now
# TODO: support html formatted messages?
msg = MIMEText(email_message.text)
msg['Subject'] = getattr(email_message, "subject", None)
msg['From'] = self.username
msg['To'] = email_message.peer
s = smtplib.SMTP(host=self.smtp_host, port=self.smtp_port)
s.ehlo()
if self.use_tls:
s.starttls()
s.login(self.username, self.password)
s.sendmail(self.username, [email_message.peer], msg.as_string())
s.quit()
def start(self):
backend.Backend.start(self)
def stop(self):
backend.Backend.stop(self)
self.info("Shutting down...")
def run(self):
while self.running:
# check for messages, if we find them, ship them off to the
# router and go back to sleep
messages = self._get_new_messages()
if messages:
for message in messages:
self.router.send(message)
# also process all outbound messages
while True:
try:
self._send(self._queue.get_nowait())
except Queue.Empty:
# break out of while
break
time.sleep(self.poll_interval)
def _get_new_messages(self):
imap_connection = imaplib.IMAP4_SSL(self.imap_host, self.imap_port)
imap_connection.login(self.username, self.password)
imap_connection.select()
all_msgs = []
# this assumes any unread message is a new message
typ, data = imap_connection.search(None, 'UNSEEN')
for num in data[0].split():
typ, data = imap_connection.fetch(num, '(RFC822)')
# get a rapidsms message from the data
email_message = self.message_from_imap(data[0][1])
all_msgs.append(email_message)
# mark it read
imap_connection.store(num, "+FLAGS", "\\Seen")
imap_connection.close()
imap_connection.logout()
return all_msgs
def message_from_imap(self, imap_mail):
"""From an IMAP message object, get a rapidsms message object"""
parsed = message_from_string(imap_mail)
from_user = parsed["From"]
# if the from format was something like:
# "Bob User" <[email protected]>
# just pull out the relevant address part from within the carats.
# Note that we don't currently do anything smart parsing email
# addresses to make sure they are valid, we either just take
# what we get, or take what we get between <>.
match = re.match(r"^.*<\s*(\S+)\s*>", from_user)
if match:
new_addr = match.groups()[0]
self.debug("converting %s to %s" % (from_user, new_addr))
from_user = new_addr
subject = parsed["Subject"]
date_string = parsed["Date"]
# TODO: until we figure out how to generically parse dates, just use
# the current time. This appears to be the standard date format, but
# currently timezone information is optional.
# date = datetime.strptime(truncated_date, "%a, %d %b %Y %H:%M:%S")
date = datetime.now()
connection = Connection(self, from_user)
message_body = get_message_body(parsed)
if not message_body:
self.error("Got a poorly formed email. Couldn't find any part with content-type text")
# TODO: not sure how to handle this. For now still route it with empty body
return EmailMessage(connection=connection, text="",
date=date, subject=subject)
return EmailMessage(connection=connection, text=message_body.get_payload(),
date=date, subject=subject, mime_type=message_body.get_content_type())
def is_plaintext(email_message):
"""Whether a message is plaintext"""
return re.match(r"^text/plain", email_message.get_content_type(), re.IGNORECASE)
def is_text(email_message):
"""Whether a message is text"""
return re.match(r"^text/.*", email_message.get_content_type(), re.IGNORECASE)
def get_message_body(email_message):
"""Walk through the message parts, taking the first text/plain.
if no text/plain (is this allowed?) will return the first
text/html"""
candidate = None
if email_message.is_multipart():
for message_part in email_message.walk():
if is_plaintext(message_part):
return message_part
elif is_text(message_part) and candidate is not None:
candidate = message_part
else:
# we don't really have a choice here
return email_message
return candidate
| rapidsms/rapidsms-legacy | lib/rapidsms/backends/email.py | Python | bsd-3-clause | 6,507 |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for Python proto2 tests.
This is intentionally modeled on C++ code in
//net/proto2/internal/test_util.*.
"""
__author__ = '[email protected] (Will Robinson)'
import os.path
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
def SetAllFields(message):
"""Sets every field in the message to a unique value.
Args:
message: A unittest_pb2.TestAllTypes instance.
"""
#
# Optional fields.
#
message.optional_int32 = 101
message.optional_int64 = 102
message.optional_uint32 = 103
message.optional_uint64 = 104
message.optional_sint32 = 105
message.optional_sint64 = 106
message.optional_fixed32 = 107
message.optional_fixed64 = 108
message.optional_sfixed32 = 109
message.optional_sfixed64 = 110
message.optional_float = 111
message.optional_double = 112
message.optional_bool = True
# TODO(robinson): Firmly spec out and test how
# protos interact with unicode. One specific example:
# what happens if we change the literal below to
# u'115'? What *should* happen? Still some discussion
# to finish with Kenton about bytes vs. strings
# and forcing everything to be utf8. :-/
message.optional_string = '115'
message.optional_bytes = '116'
message.optionalgroup.a = 117
message.optional_nested_message.bb = 118
message.optional_foreign_message.c = 119
message.optional_import_message.d = 120
message.optional_nested_enum = unittest_pb2.TestAllTypes.BAZ
message.optional_foreign_enum = unittest_pb2.FOREIGN_BAZ
message.optional_import_enum = unittest_import_pb2.IMPORT_BAZ
message.optional_string_piece = '124'
message.optional_cord = '125'
#
# Repeated fields.
#
message.repeated_int32.append(201)
message.repeated_int64.append(202)
message.repeated_uint32.append(203)
message.repeated_uint64.append(204)
message.repeated_sint32.append(205)
message.repeated_sint64.append(206)
message.repeated_fixed32.append(207)
message.repeated_fixed64.append(208)
message.repeated_sfixed32.append(209)
message.repeated_sfixed64.append(210)
message.repeated_float.append(211)
message.repeated_double.append(212)
message.repeated_bool.append(True)
message.repeated_string.append('215')
message.repeated_bytes.append('216')
message.repeatedgroup.add().a = 217
message.repeated_nested_message.add().bb = 218
message.repeated_foreign_message.add().c = 219
message.repeated_import_message.add().d = 220
message.repeated_nested_enum.append(unittest_pb2.TestAllTypes.BAR)
message.repeated_foreign_enum.append(unittest_pb2.FOREIGN_BAR)
message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAR)
message.repeated_string_piece.append('224')
message.repeated_cord.append('225')
# Add a second one of each field.
message.repeated_int32.append(301)
message.repeated_int64.append(302)
message.repeated_uint32.append(303)
message.repeated_uint64.append(304)
message.repeated_sint32.append(305)
message.repeated_sint64.append(306)
message.repeated_fixed32.append(307)
message.repeated_fixed64.append(308)
message.repeated_sfixed32.append(309)
message.repeated_sfixed64.append(310)
message.repeated_float.append(311)
message.repeated_double.append(312)
message.repeated_bool.append(False)
message.repeated_string.append('315')
message.repeated_bytes.append('316')
message.repeatedgroup.add().a = 317
message.repeated_nested_message.add().bb = 318
message.repeated_foreign_message.add().c = 319
message.repeated_import_message.add().d = 320
message.repeated_nested_enum.append(unittest_pb2.TestAllTypes.BAZ)
message.repeated_foreign_enum.append(unittest_pb2.FOREIGN_BAZ)
message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAZ)
message.repeated_string_piece.append('324')
message.repeated_cord.append('325')
#
# Fields that have defaults.
#
message.default_int32 = 401
message.default_int64 = 402
message.default_uint32 = 403
message.default_uint64 = 404
message.default_sint32 = 405
message.default_sint64 = 406
message.default_fixed32 = 407
message.default_fixed64 = 408
message.default_sfixed32 = 409
message.default_sfixed64 = 410
message.default_float = 411
message.default_double = 412
message.default_bool = False
message.default_string = '415'
message.default_bytes = '416'
message.default_nested_enum = unittest_pb2.TestAllTypes.FOO
message.default_foreign_enum = unittest_pb2.FOREIGN_FOO
message.default_import_enum = unittest_import_pb2.IMPORT_FOO
message.default_string_piece = '424'
message.default_cord = '425'
def SetAllExtensions(message):
"""Sets every extension in the message to a unique value.
Args:
message: A unittest_pb2.TestAllExtensions instance.
"""
extensions = message.Extensions
pb2 = unittest_pb2
import_pb2 = unittest_import_pb2
#
# Optional fields.
#
extensions[pb2.optional_int32_extension] = 101
extensions[pb2.optional_int64_extension] = 102
extensions[pb2.optional_uint32_extension] = 103
extensions[pb2.optional_uint64_extension] = 104
extensions[pb2.optional_sint32_extension] = 105
extensions[pb2.optional_sint64_extension] = 106
extensions[pb2.optional_fixed32_extension] = 107
extensions[pb2.optional_fixed64_extension] = 108
extensions[pb2.optional_sfixed32_extension] = 109
extensions[pb2.optional_sfixed64_extension] = 110
extensions[pb2.optional_float_extension] = 111
extensions[pb2.optional_double_extension] = 112
extensions[pb2.optional_bool_extension] = True
extensions[pb2.optional_string_extension] = '115'
extensions[pb2.optional_bytes_extension] = '116'
extensions[pb2.optionalgroup_extension].a = 117
extensions[pb2.optional_nested_message_extension].bb = 118
extensions[pb2.optional_foreign_message_extension].c = 119
extensions[pb2.optional_import_message_extension].d = 120
extensions[pb2.optional_nested_enum_extension] = pb2.TestAllTypes.BAZ
extensions[pb2.optional_nested_enum_extension] = pb2.TestAllTypes.BAZ
extensions[pb2.optional_foreign_enum_extension] = pb2.FOREIGN_BAZ
extensions[pb2.optional_import_enum_extension] = import_pb2.IMPORT_BAZ
extensions[pb2.optional_string_piece_extension] = '124'
extensions[pb2.optional_cord_extension] = '125'
#
# Repeated fields.
#
extensions[pb2.repeated_int32_extension].append(201)
extensions[pb2.repeated_int64_extension].append(202)
extensions[pb2.repeated_uint32_extension].append(203)
extensions[pb2.repeated_uint64_extension].append(204)
extensions[pb2.repeated_sint32_extension].append(205)
extensions[pb2.repeated_sint64_extension].append(206)
extensions[pb2.repeated_fixed32_extension].append(207)
extensions[pb2.repeated_fixed64_extension].append(208)
extensions[pb2.repeated_sfixed32_extension].append(209)
extensions[pb2.repeated_sfixed64_extension].append(210)
extensions[pb2.repeated_float_extension].append(211)
extensions[pb2.repeated_double_extension].append(212)
extensions[pb2.repeated_bool_extension].append(True)
extensions[pb2.repeated_string_extension].append('215')
extensions[pb2.repeated_bytes_extension].append('216')
extensions[pb2.repeatedgroup_extension].add().a = 217
extensions[pb2.repeated_nested_message_extension].add().bb = 218
extensions[pb2.repeated_foreign_message_extension].add().c = 219
extensions[pb2.repeated_import_message_extension].add().d = 220
extensions[pb2.repeated_nested_enum_extension].append(pb2.TestAllTypes.BAR)
extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAR)
extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAR)
extensions[pb2.repeated_string_piece_extension].append('224')
extensions[pb2.repeated_cord_extension].append('225')
# Append a second one of each field.
extensions[pb2.repeated_int32_extension].append(301)
extensions[pb2.repeated_int64_extension].append(302)
extensions[pb2.repeated_uint32_extension].append(303)
extensions[pb2.repeated_uint64_extension].append(304)
extensions[pb2.repeated_sint32_extension].append(305)
extensions[pb2.repeated_sint64_extension].append(306)
extensions[pb2.repeated_fixed32_extension].append(307)
extensions[pb2.repeated_fixed64_extension].append(308)
extensions[pb2.repeated_sfixed32_extension].append(309)
extensions[pb2.repeated_sfixed64_extension].append(310)
extensions[pb2.repeated_float_extension].append(311)
extensions[pb2.repeated_double_extension].append(312)
extensions[pb2.repeated_bool_extension].append(False)
extensions[pb2.repeated_string_extension].append('315')
extensions[pb2.repeated_bytes_extension].append('316')
extensions[pb2.repeatedgroup_extension].add().a = 317
extensions[pb2.repeated_nested_message_extension].add().bb = 318
extensions[pb2.repeated_foreign_message_extension].add().c = 319
extensions[pb2.repeated_import_message_extension].add().d = 320
extensions[pb2.repeated_nested_enum_extension].append(pb2.TestAllTypes.BAZ)
extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAZ)
extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAZ)
extensions[pb2.repeated_string_piece_extension].append('324')
extensions[pb2.repeated_cord_extension].append('325')
#
# Fields with defaults.
#
extensions[pb2.default_int32_extension] = 401
extensions[pb2.default_int64_extension] = 402
extensions[pb2.default_uint32_extension] = 403
extensions[pb2.default_uint64_extension] = 404
extensions[pb2.default_sint32_extension] = 405
extensions[pb2.default_sint64_extension] = 406
extensions[pb2.default_fixed32_extension] = 407
extensions[pb2.default_fixed64_extension] = 408
extensions[pb2.default_sfixed32_extension] = 409
extensions[pb2.default_sfixed64_extension] = 410
extensions[pb2.default_float_extension] = 411
extensions[pb2.default_double_extension] = 412
extensions[pb2.default_bool_extension] = False
extensions[pb2.default_string_extension] = '415'
extensions[pb2.default_bytes_extension] = '416'
extensions[pb2.default_nested_enum_extension] = pb2.TestAllTypes.FOO
extensions[pb2.default_foreign_enum_extension] = pb2.FOREIGN_FOO
extensions[pb2.default_import_enum_extension] = import_pb2.IMPORT_FOO
extensions[pb2.default_string_piece_extension] = '424'
extensions[pb2.default_cord_extension] = '425'
def SetAllFieldsAndExtensions(message):
"""Sets every field and extension in the message to a unique value.
Args:
message: A unittest_pb2.TestAllExtensions message.
"""
message.my_int = 1
message.my_string = 'foo'
message.my_float = 1.0
message.Extensions[unittest_pb2.my_extension_int] = 23
message.Extensions[unittest_pb2.my_extension_string] = 'bar'
def ExpectAllFieldsAndExtensionsInOrder(serialized):
"""Ensures that serialized is the serialization we expect for a message
filled with SetAllFieldsAndExtensions(). (Specifically, ensures that the
serialization is in canonical, tag-number order).
"""
my_extension_int = unittest_pb2.my_extension_int
my_extension_string = unittest_pb2.my_extension_string
expected_strings = []
message = unittest_pb2.TestFieldOrderings()
message.my_int = 1 # Field 1.
expected_strings.append(message.SerializeToString())
message.Clear()
message.Extensions[my_extension_int] = 23 # Field 5.
expected_strings.append(message.SerializeToString())
message.Clear()
message.my_string = 'foo' # Field 11.
expected_strings.append(message.SerializeToString())
message.Clear()
message.Extensions[my_extension_string] = 'bar' # Field 50.
expected_strings.append(message.SerializeToString())
message.Clear()
message.my_float = 1.0
expected_strings.append(message.SerializeToString())
message.Clear()
expected = ''.join(expected_strings)
if expected != serialized:
raise ValueError('Expected %r, found %r' % (expected, serialized))
class GoldenMessageTestCase(unittest.TestCase):
"""This adds methods to TestCase useful for verifying our Golden Message."""
def ExpectAllFieldsSet(self, message):
"""Check all fields for correct values have after Set*Fields() is called."""
self.assertTrue(message.HasField('optional_int32'))
self.assertTrue(message.HasField('optional_int64'))
self.assertTrue(message.HasField('optional_uint32'))
self.assertTrue(message.HasField('optional_uint64'))
self.assertTrue(message.HasField('optional_sint32'))
self.assertTrue(message.HasField('optional_sint64'))
self.assertTrue(message.HasField('optional_fixed32'))
self.assertTrue(message.HasField('optional_fixed64'))
self.assertTrue(message.HasField('optional_sfixed32'))
self.assertTrue(message.HasField('optional_sfixed64'))
self.assertTrue(message.HasField('optional_float'))
self.assertTrue(message.HasField('optional_double'))
self.assertTrue(message.HasField('optional_bool'))
self.assertTrue(message.HasField('optional_string'))
self.assertTrue(message.HasField('optional_bytes'))
self.assertTrue(message.HasField('optionalgroup'))
self.assertTrue(message.HasField('optional_nested_message'))
self.assertTrue(message.HasField('optional_foreign_message'))
self.assertTrue(message.HasField('optional_import_message'))
self.assertTrue(message.optionalgroup.HasField('a'))
self.assertTrue(message.optional_nested_message.HasField('bb'))
self.assertTrue(message.optional_foreign_message.HasField('c'))
self.assertTrue(message.optional_import_message.HasField('d'))
self.assertTrue(message.HasField('optional_nested_enum'))
self.assertTrue(message.HasField('optional_foreign_enum'))
self.assertTrue(message.HasField('optional_import_enum'))
self.assertTrue(message.HasField('optional_string_piece'))
self.assertTrue(message.HasField('optional_cord'))
self.assertEqual(101, message.optional_int32)
self.assertEqual(102, message.optional_int64)
self.assertEqual(103, message.optional_uint32)
self.assertEqual(104, message.optional_uint64)
self.assertEqual(105, message.optional_sint32)
self.assertEqual(106, message.optional_sint64)
self.assertEqual(107, message.optional_fixed32)
self.assertEqual(108, message.optional_fixed64)
self.assertEqual(109, message.optional_sfixed32)
self.assertEqual(110, message.optional_sfixed64)
self.assertEqual(111, message.optional_float)
self.assertEqual(112, message.optional_double)
self.assertEqual(True, message.optional_bool)
self.assertEqual('115', message.optional_string)
self.assertEqual('116', message.optional_bytes)
self.assertEqual(117, message.optionalgroup.a);
self.assertEqual(118, message.optional_nested_message.bb)
self.assertEqual(119, message.optional_foreign_message.c)
self.assertEqual(120, message.optional_import_message.d)
self.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.optional_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_BAZ, message.optional_foreign_enum)
self.assertEqual(unittest_import_pb2.IMPORT_BAZ,
message.optional_import_enum)
# -----------------------------------------------------------------
self.assertEqual(2, len(message.repeated_int32))
self.assertEqual(2, len(message.repeated_int64))
self.assertEqual(2, len(message.repeated_uint32))
self.assertEqual(2, len(message.repeated_uint64))
self.assertEqual(2, len(message.repeated_sint32))
self.assertEqual(2, len(message.repeated_sint64))
self.assertEqual(2, len(message.repeated_fixed32))
self.assertEqual(2, len(message.repeated_fixed64))
self.assertEqual(2, len(message.repeated_sfixed32))
self.assertEqual(2, len(message.repeated_sfixed64))
self.assertEqual(2, len(message.repeated_float))
self.assertEqual(2, len(message.repeated_double))
self.assertEqual(2, len(message.repeated_bool))
self.assertEqual(2, len(message.repeated_string))
self.assertEqual(2, len(message.repeated_bytes))
self.assertEqual(2, len(message.repeatedgroup))
self.assertEqual(2, len(message.repeated_nested_message))
self.assertEqual(2, len(message.repeated_foreign_message))
self.assertEqual(2, len(message.repeated_import_message))
self.assertEqual(2, len(message.repeated_nested_enum))
self.assertEqual(2, len(message.repeated_foreign_enum))
self.assertEqual(2, len(message.repeated_import_enum))
self.assertEqual(2, len(message.repeated_string_piece))
self.assertEqual(2, len(message.repeated_cord))
self.assertEqual(201, message.repeated_int32[0])
self.assertEqual(202, message.repeated_int64[0])
self.assertEqual(203, message.repeated_uint32[0])
self.assertEqual(204, message.repeated_uint64[0])
self.assertEqual(205, message.repeated_sint32[0])
self.assertEqual(206, message.repeated_sint64[0])
self.assertEqual(207, message.repeated_fixed32[0])
self.assertEqual(208, message.repeated_fixed64[0])
self.assertEqual(209, message.repeated_sfixed32[0])
self.assertEqual(210, message.repeated_sfixed64[0])
self.assertEqual(211, message.repeated_float[0])
self.assertEqual(212, message.repeated_double[0])
self.assertEqual(True, message.repeated_bool[0])
self.assertEqual('215', message.repeated_string[0])
self.assertEqual('216', message.repeated_bytes[0])
self.assertEqual(217, message.repeatedgroup[0].a)
self.assertEqual(218, message.repeated_nested_message[0].bb)
self.assertEqual(219, message.repeated_foreign_message[0].c)
self.assertEqual(220, message.repeated_import_message[0].d)
self.assertEqual(unittest_pb2.TestAllTypes.BAR,
message.repeated_nested_enum[0])
self.assertEqual(unittest_pb2.FOREIGN_BAR,
message.repeated_foreign_enum[0])
self.assertEqual(unittest_import_pb2.IMPORT_BAR,
message.repeated_import_enum[0])
self.assertEqual(301, message.repeated_int32[1])
self.assertEqual(302, message.repeated_int64[1])
self.assertEqual(303, message.repeated_uint32[1])
self.assertEqual(304, message.repeated_uint64[1])
self.assertEqual(305, message.repeated_sint32[1])
self.assertEqual(306, message.repeated_sint64[1])
self.assertEqual(307, message.repeated_fixed32[1])
self.assertEqual(308, message.repeated_fixed64[1])
self.assertEqual(309, message.repeated_sfixed32[1])
self.assertEqual(310, message.repeated_sfixed64[1])
self.assertEqual(311, message.repeated_float[1])
self.assertEqual(312, message.repeated_double[1])
self.assertEqual(False, message.repeated_bool[1])
self.assertEqual('315', message.repeated_string[1])
self.assertEqual('316', message.repeated_bytes[1])
self.assertEqual(317, message.repeatedgroup[1].a)
self.assertEqual(318, message.repeated_nested_message[1].bb)
self.assertEqual(319, message.repeated_foreign_message[1].c)
self.assertEqual(320, message.repeated_import_message[1].d)
self.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.repeated_nested_enum[1])
self.assertEqual(unittest_pb2.FOREIGN_BAZ,
message.repeated_foreign_enum[1])
self.assertEqual(unittest_import_pb2.IMPORT_BAZ,
message.repeated_import_enum[1])
# -----------------------------------------------------------------
self.assertTrue(message.HasField('default_int32'))
self.assertTrue(message.HasField('default_int64'))
self.assertTrue(message.HasField('default_uint32'))
self.assertTrue(message.HasField('default_uint64'))
self.assertTrue(message.HasField('default_sint32'))
self.assertTrue(message.HasField('default_sint64'))
self.assertTrue(message.HasField('default_fixed32'))
self.assertTrue(message.HasField('default_fixed64'))
self.assertTrue(message.HasField('default_sfixed32'))
self.assertTrue(message.HasField('default_sfixed64'))
self.assertTrue(message.HasField('default_float'))
self.assertTrue(message.HasField('default_double'))
self.assertTrue(message.HasField('default_bool'))
self.assertTrue(message.HasField('default_string'))
self.assertTrue(message.HasField('default_bytes'))
self.assertTrue(message.HasField('default_nested_enum'))
self.assertTrue(message.HasField('default_foreign_enum'))
self.assertTrue(message.HasField('default_import_enum'))
self.assertEqual(401, message.default_int32)
self.assertEqual(402, message.default_int64)
self.assertEqual(403, message.default_uint32)
self.assertEqual(404, message.default_uint64)
self.assertEqual(405, message.default_sint32)
self.assertEqual(406, message.default_sint64)
self.assertEqual(407, message.default_fixed32)
self.assertEqual(408, message.default_fixed64)
self.assertEqual(409, message.default_sfixed32)
self.assertEqual(410, message.default_sfixed64)
self.assertEqual(411, message.default_float)
self.assertEqual(412, message.default_double)
self.assertEqual(False, message.default_bool)
self.assertEqual('415', message.default_string)
self.assertEqual('416', message.default_bytes)
self.assertEqual(unittest_pb2.TestAllTypes.FOO, message.default_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_FOO, message.default_foreign_enum)
self.assertEqual(unittest_import_pb2.IMPORT_FOO,
message.default_import_enum)
def GoldenFile(filename):
"""Finds the given golden file and returns a file object representing it."""
# Search up the directory tree looking for the C++ protobuf source code.
path = '.'
while os.path.exists(path):
if os.path.exists(os.path.join(path, 'src/google/protobuf')):
# Found it. Load the golden file from the testdata directory.
full_path = os.path.join(path, 'src/google/protobuf/testdata', filename)
return open(full_path, 'rb')
path = os.path.join(path, '..')
raise RuntimeError(
'Could not find golden files. This test must be run from within the '
'protobuf source package so that it can read test data files from the '
'C++ source tree.')
def SetAllPackedFields(message):
"""Sets every field in the message to a unique value.
Args:
message: A unittest_pb2.TestPackedTypes instance.
"""
message.packed_int32.extend([101, 102])
message.packed_int64.extend([103, 104])
message.packed_uint32.extend([105, 106])
message.packed_uint64.extend([107, 108])
message.packed_sint32.extend([109, 110])
message.packed_sint64.extend([111, 112])
message.packed_fixed32.extend([113, 114])
message.packed_fixed64.extend([115, 116])
message.packed_sfixed32.extend([117, 118])
message.packed_sfixed64.extend([119, 120])
message.packed_float.extend([121.0, 122.0])
message.packed_double.extend([122.0, 123.0])
message.packed_bool.extend([True, False])
message.packed_enum.extend([unittest_pb2.FOREIGN_FOO,
unittest_pb2.FOREIGN_BAR])
def SetAllPackedExtensions(message):
"""Sets every extension in the message to a unique value.
Args:
message: A unittest_pb2.TestPackedExtensions instance.
"""
extensions = message.Extensions
pb2 = unittest_pb2
extensions[pb2.packed_int32_extension].append(101)
extensions[pb2.packed_int64_extension].append(102)
extensions[pb2.packed_uint32_extension].append(103)
extensions[pb2.packed_uint64_extension].append(104)
extensions[pb2.packed_sint32_extension].append(105)
extensions[pb2.packed_sint64_extension].append(106)
extensions[pb2.packed_fixed32_extension].append(107)
extensions[pb2.packed_fixed64_extension].append(108)
extensions[pb2.packed_sfixed32_extension].append(109)
extensions[pb2.packed_sfixed64_extension].append(110)
extensions[pb2.packed_float_extension].append(111.0)
extensions[pb2.packed_double_extension].append(112.0)
extensions[pb2.packed_bool_extension].append(True)
extensions[pb2.packed_enum_extension].append(pb2.FOREIGN_BAZ)
| TextusData/Mover | thirdparty/protobuf-2.2.0/python/google/protobuf/internal/test_util.py | Python | gpl-3.0 | 25,634 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Serializers for PyArrow and pandas conversions. See `pyspark.serializers` for more details.
"""
from pyspark.serializers import Serializer, read_int, write_int, UTF8Deserializer
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class ArrowCollectSerializer(Serializer):
"""
Deserialize a stream of batches followed by batch order information. Used in
PandasConversionMixin._collect_as_arrow() after invoking Dataset.collectAsArrowToPython()
in the JVM.
"""
def __init__(self):
self.serializer = ArrowStreamSerializer()
def dump_stream(self, iterator, stream):
return self.serializer.dump_stream(iterator, stream)
def load_stream(self, stream):
"""
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields
a list of indices that can be used to put the RecordBatches in the correct order.
"""
# load the batches
for batch in self.serializer.load_stream(stream):
yield batch
# load the batch order indices or propagate any error that occurred in the JVM
num = read_int(stream)
if num == -1:
error_msg = UTF8Deserializer().loads(stream)
raise RuntimeError("An error occurred while calling "
"ArrowCollectSerializer.load_stream: {}".format(error_msg))
batch_order = []
for i in range(num):
index = read_int(stream)
batch_order.append(index)
yield batch_order
def __repr__(self):
return "ArrowCollectSerializer(%s)" % self.serializer
class ArrowStreamSerializer(Serializer):
"""
Serializes Arrow record batches as a stream.
"""
def dump_stream(self, iterator, stream):
import pyarrow as pa
writer = None
try:
for batch in iterator:
if writer is None:
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
import pyarrow as pa
reader = pa.ipc.open_stream(stream)
for batch in reader:
yield batch
def __repr__(self):
return "ArrowStreamSerializer"
class ArrowStreamPandasSerializer(ArrowStreamSerializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
Parameters
----------
timezone : str
A timezone to respect when handling timestamp values
safecheck : bool
If True, conversion from Arrow to Pandas checks for overflow/truncation
assign_cols_by_name : bool
If True, then Pandas DataFrames will get columns by name
"""
def __init__(self, timezone, safecheck, assign_cols_by_name):
super(ArrowStreamPandasSerializer, self).__init__()
self._timezone = timezone
self._safecheck = safecheck
self._assign_cols_by_name = assign_cols_by_name
def arrow_to_pandas(self, arrow_column):
from pyspark.sql.pandas.types import _check_series_localize_timestamps, \
_convert_map_items_to_dict
import pyarrow
# If the given column is a date type column, creates a series of datetime.date directly
# instead of creating datetime64[ns] as intermediate data to avoid overflow caused by
# datetime64[ns] type handling.
s = arrow_column.to_pandas(date_as_object=True)
if pyarrow.types.is_timestamp(arrow_column.type):
return _check_series_localize_timestamps(s, self._timezone)
elif pyarrow.types.is_map(arrow_column.type):
return _convert_map_items_to_dict(s)
else:
return s
def _create_batch(self, series):
"""
Create an Arrow record batch from the given pandas.Series or list of Series,
with optional type.
Parameters
----------
series : pandas.Series or list
A single series, list of series, or list of (series, arrow_type)
Returns
-------
pyarrow.RecordBatch
Arrow RecordBatch
"""
import pandas as pd
import pyarrow as pa
from pyspark.sql.pandas.types import _check_series_convert_timestamps_internal, \
_convert_dict_to_map_items
from pandas.api.types import is_categorical_dtype
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
def create_array(s, t):
mask = s.isnull()
# Ensure timestamp series are in expected form for Spark internal representation
if t is not None and pa.types.is_timestamp(t):
s = _check_series_convert_timestamps_internal(s, self._timezone)
elif t is not None and pa.types.is_map(t):
s = _convert_dict_to_map_items(s)
elif is_categorical_dtype(s.dtype):
# Note: This can be removed once minimum pyarrow version is >= 0.16.1
s = s.astype(s.dtypes.categories.dtype)
try:
array = pa.Array.from_pandas(s, mask=mask, type=t, safe=self._safecheck)
except ValueError as e:
if self._safecheck:
error_msg = "Exception thrown when converting pandas.Series (%s) to " + \
"Arrow Array (%s). It can be caused by overflows or other " + \
"unsafe conversions warned by Arrow. Arrow safe type check " + \
"can be disabled by using SQL config " + \
"`spark.sql.execution.pandas.convertToArrowArraySafely`."
raise ValueError(error_msg % (s.dtype, t)) from e
else:
raise e
return array
arrs = []
for s, t in series:
if t is not None and pa.types.is_struct(t):
if not isinstance(s, pd.DataFrame):
raise ValueError("A field of type StructType expects a pandas.DataFrame, "
"but got: %s" % str(type(s)))
# Input partition and result pandas.DataFrame empty, make empty Arrays with struct
if len(s) == 0 and len(s.columns) == 0:
arrs_names = [(pa.array([], type=field.type), field.name) for field in t]
# Assign result columns by schema name if user labeled with strings
elif self._assign_cols_by_name and any(isinstance(name, str)
for name in s.columns):
arrs_names = [(create_array(s[field.name], field.type), field.name)
for field in t]
# Assign result columns by position
else:
arrs_names = [(create_array(s[s.columns[i]], field.type), field.name)
for i, field in enumerate(t)]
struct_arrs, struct_names = zip(*arrs_names)
arrs.append(pa.StructArray.from_arrays(struct_arrs, struct_names))
else:
arrs.append(create_array(s, t))
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in range(len(arrs))])
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
batches = (self._create_batch(series) for series in iterator)
super(ArrowStreamPandasSerializer, self).dump_stream(batches, stream)
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
batches = super(ArrowStreamPandasSerializer, self).load_stream(stream)
import pyarrow as pa
for batch in batches:
yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class ArrowStreamPandasUDFSerializer(ArrowStreamPandasSerializer):
"""
Serializer used by Python worker to evaluate Pandas UDFs
"""
def __init__(self, timezone, safecheck, assign_cols_by_name, df_for_struct=False):
super(ArrowStreamPandasUDFSerializer, self) \
.__init__(timezone, safecheck, assign_cols_by_name)
self._df_for_struct = df_for_struct
def arrow_to_pandas(self, arrow_column):
import pyarrow.types as types
if self._df_for_struct and types.is_struct(arrow_column.type):
import pandas as pd
series = [super(ArrowStreamPandasUDFSerializer, self).arrow_to_pandas(column)
.rename(field.name)
for column, field in zip(arrow_column.flatten(), arrow_column.type)]
s = pd.concat(series, axis=1)
else:
s = super(ArrowStreamPandasUDFSerializer, self).arrow_to_pandas(arrow_column)
return s
def dump_stream(self, iterator, stream):
"""
Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent.
This should be sent after creating the first record batch so in case of an error, it can
be sent back to the JVM before the Arrow stream starts.
"""
def init_stream_yield_batches():
should_write_start_length = True
for series in iterator:
batch = self._create_batch(series)
if should_write_start_length:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
should_write_start_length = False
yield batch
return ArrowStreamSerializer.dump_stream(self, init_stream_yield_batches(), stream)
def __repr__(self):
return "ArrowStreamPandasUDFSerializer"
class CogroupUDFSerializer(ArrowStreamPandasUDFSerializer):
def load_stream(self, stream):
"""
Deserialize Cogrouped ArrowRecordBatches to a tuple of Arrow tables and yield as two
lists of pandas.Series.
"""
import pyarrow as pa
dataframes_in_group = None
while dataframes_in_group is None or dataframes_in_group > 0:
dataframes_in_group = read_int(stream)
if dataframes_in_group == 2:
batch1 = [batch for batch in ArrowStreamSerializer.load_stream(self, stream)]
batch2 = [batch for batch in ArrowStreamSerializer.load_stream(self, stream)]
yield (
[self.arrow_to_pandas(c) for c in pa.Table.from_batches(batch1).itercolumns()],
[self.arrow_to_pandas(c) for c in pa.Table.from_batches(batch2).itercolumns()]
)
elif dataframes_in_group != 0:
raise ValueError(
'Invalid number of pandas.DataFrames in group {0}'.format(dataframes_in_group))
| wangmiao1981/spark | python/pyspark/sql/pandas/serializers.py | Python | apache-2.0 | 12,308 |
#!/usr/bin/env python3
import os, sys
import unittest
from gppylib import gplog
from gpsegstart import GpSegStart
from mock import patch
logger = gplog.get_unittest_logger()
class GpSegStartTestCase(unittest.TestCase):
@patch('gpsegstart.GpSegStart.getOverallStatusKeys', return_value=[])
@patch('gpsegstart.gp.GpVersion.local', return_value=None)
@patch('gpsegstart.base.WorkerPool')
def test_check_postmasters_01(self, mk1, mk2, mk3):
db = '1|1|p|p|s|u|cdw|cdw-1|2000|/data/gpseg-1s'
gpseg = GpSegStart([db], None, 'col1:col2:col3', 'quiescent', None, None, None, None, None, None, None, 1)
result = gpseg.checkPostmasters(False)
self.assertTrue(result)
@patch('gpsegstart.GpSegStart.getOverallStatusKeys', return_value=['foo1', 'foo2'])
@patch('gpsegstart.gp.check_pid', return_value=False)
@patch('gpsegstart.gp.GpVersion.local', return_value=None)
@patch('gpsegstart.base.WorkerPool')
def test_check_postmasters_02(self, mk1, mk2, mk3, mk4):
db = '1|1|p|p|s|u|cdw|cdw-1|2000|/data/gpseg-1s'
gpseg = GpSegStart([db], None, 'col1:col2:col3', 'quiescent', None, None, None, None, None, None, None, 1)
result = gpseg.checkPostmasters(False)
self.assertFalse(result)
@patch('gpsegstart.GpSegStart.getOverallStatusKeys', return_value=['foo1', 'foo2'])
@patch('gpsegstart.gp.check_pid', side_effect=[False, True])
@patch('gpsegstart.gp.GpVersion.local', return_value=None)
@patch('gpsegstart.base.WorkerPool')
def test_check_postmasters_03(self, mk1, mk2, mk3, mk4):
db = '1|1|p|p|s|u|cdw|cdw-1|2000|/data/gpseg-1s'
gpseg = GpSegStart([db], None, 'col1:col2:col3', 'quiescent', None, None, None, None, None, None, None, 1)
result = gpseg.checkPostmasters(False)
self.assertFalse(result)
#------------------------------- Mainline --------------------------------
if __name__ == '__main__':
unittest.main()
| 50wu/gpdb | gpMgmt/bin/gppylib/test/unit/test_cluster_gpsegstart.py | Python | apache-2.0 | 1,966 |
# coding: utf-8
import os
from gppylib.db import dbconn
from gppylib.test.behave_utils.utils import run_gpcommand
from gppylib.gparray import GpArray
from gppylib.test.behave_utils.utils import get_all_hostnames_as_list
from gppylib.operations.backup_utils import generate_report_filename, generate_global_filename, generate_cdatabase_filename, \
get_backup_directory, generate_master_config_filename, generate_segment_config_filename, \
generate_ao_state_filename, generate_co_state_filename, generate_pgstatlastoperation_filename
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
@given('the NetBackup "{ver}" libraries are loaded')
def impl(context, ver):
hosts = set(get_all_hostnames_as_list(context, 'template1'))
gphome = os.environ.get('GPHOME')
if ver == '7.5':
cpCmd = 'cp -f %s/lib/nbu75/lib/* %s/lib/' % (gphome, gphome)
elif ver == '7.1':
cpCmd = 'cp -f %s/lib/nbu71/lib/* %s/lib/' % (gphome, gphome)
for host in hosts:
cmd = Command(name='Copy NBU lib files',
cmdStr=cpCmd,
ctxt=REMOTE,
remoteHost=host)
cmd.run(validateAfter=True)
@when('the user runs "{cmd_str}" using netbackup')
def impl(context, cmd_str):
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
if hasattr(context, 'netbackup_policy'):
netbackup_policy = context.netbackup_policy
if hasattr(context, 'netbackup_schedule'):
netbackup_schedule = context.netbackup_schedule
bnr_tool = cmd_str.split()[0].strip()
if bnr_tool == 'gpcrondump':
command_str = cmd_str + " --netbackup-service-host " + netbackup_service_host + " --netbackup-policy " + netbackup_policy + " --netbackup-schedule " + netbackup_schedule
elif bnr_tool == 'gpdbrestore':
command_str = cmd_str + " --netbackup-service-host " + netbackup_service_host
elif bnr_tool == 'gp_dump':
command_str = cmd_str + " --netbackup-service-host " + netbackup_service_host + " --netbackup-policy " + netbackup_policy + " --netbackup-schedule " + netbackup_schedule
elif bnr_tool == 'gp_restore':
command_str = cmd_str + " --netbackup-service-host " + netbackup_service_host
run_gpcommand(context, command_str)
@when('the user runs backup command "{cmd}" using netbackup')
def impl(context, cmd):
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
if hasattr(context, 'netbackup_policy'):
netbackup_policy = context.netbackup_policy
if hasattr(context, 'netbackup_schedule'):
netbackup_schedule = context.netbackup_schedule
command_str = cmd + " --netbackup-service-host " + netbackup_service_host + " --netbackup-policy " + netbackup_policy + " --netbackup-schedule " + netbackup_schedule
run_command(context, command_str)
@when('the user runs restore command "{cmd}" using netbackup')
def impl(context, cmd):
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
command_str = cmd + " --netbackup-service-host " + netbackup_service_host
run_command(context, command_str)
@when('the user runs gpdbrestore with the stored timestamp using netbackup')
def impl(context):
if hasattr(context, 'backup_timestamp'):
ts = context.backup_timestamp
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
command = 'gpdbrestore -e -a -t ' + ts + " --netbackup-service-host " + netbackup_service_host
run_gpcommand(context, command)
@when('the user runs gpdbrestore with the stored timestamp and options "{options}" using netbackup')
def impl(context, options):
if hasattr(context, 'backup_timestamp'):
ts = context.backup_timestamp
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
if options == '-b':
command = 'gpdbrestore -e -b %s -a --netbackup-service-host %s' % (ts[0:8], netbackup_service_host)
else:
command = 'gpdbrestore -e -t %s %s -a --netbackup-service-host %s' % (ts, options, netbackup_service_host)
run_gpcommand(context, command)
@when('the user runs gpdbrestore with the stored timestamp and options "{options}" without -e option using netbackup')
def impl(context, options):
if hasattr(context, 'backup_timestamp'):
ts = context.backup_timestamp
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
if options == '-b':
command = 'gpdbrestore -b %s -a --netbackup-service-host %s' % (context.backup_timestamp[0:8], netbackup_service_host)
else:
command = 'gpdbrestore -t %s %s -a --netbackup-service-host %s' % (context.backup_timestamp, options, netbackup_service_host)
run_gpcommand(context, command)
@when('the user runs gpdbrestore with "{opt}" option in path "{path}" using netbackup')
def impl(context, opt, path):
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
command = 'gpdbrestore -e -a %s localhost:%s/db_dumps/%s --netbackup-service-host %s' % (opt, path, context.backup_subdir, netbackup_service_host)
run_gpcommand(context, command)
@when('the user runs gp_restore with the the stored timestamp and subdir in "{dbname}" using netbackup')
def impl(context, dbname):
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
command = 'gp_restore -i --gp-k %s --gp-d db_dumps/%s --gp-i --gp-r db_dumps/%s --gp-l=p -d %s --gp-c --netbackup-service-host %s' % (context.backup_timestamp, context.backup_subdir, context.backup_subdir, dbname, netbackup_service_host)
run_gpcommand(context, command)
@then('verify that the config files are backed up with the stored timestamp using netbackup')
def impl(context):
if hasattr(context, 'backup_timestamp'):
ts = context.backup_timestamp
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
if not hasattr(context, "dump_prefix"):
context.dump_prefix = ''
master_config_filename = os.path.join(master_data_dir, 'db_dumps', context.backup_timestamp[0:8],
'%sgp_master_config_files_%s.tar' % (context.dump_prefix, ts))
command_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, master_config_filename)
cmd = Command('Validate master config file', command_str)
cmd.run(validateAfter=True)
results = cmd.get_results().stdout.strip()
if results != master_config_filename:
raise Exception('Expected Master Config file: %s and found: %s. Master Config file was not backup up to NetBackup server' % (master_config_filename, results))
gparray = GpArray.initFromCatalog(dbconn.DbURL())
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
for seg in primary_segs:
segment_config_filename = os.path.join(seg.getSegmentDataDirectory(), 'db_dumps', context.backup_timestamp[0:8],
'%sgp_segment_config_files_0_%s_%s.tar' % (context.dump_prefix, seg.getSegmentDbId(), context.backup_timestamp))
command_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, segment_config_filename)
cmd = Command('Validate segment config file', command_str, ctxt=REMOTE, remoteHost = seg.getSegmentHostName())
cmd.run(validateAfter=True)
results = cmd.get_results().stdout.strip()
if results != segment_config_filename:
raise Exception('Expected Segment Config file: %s and found: %s. Segment Config file was not backup up to NetBackup server' % (segment_config_filename, results))
@when('the user runs gpdbrestore with the stored timestamp to print the backup set with options "{options}" using netbackup')
def impl(context, options):
if hasattr(context, 'backup_timestamp'):
ts = context.backup_timestamp
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
command = 'gpdbrestore -t %s %s --list-backup --netbackup-service-host %s' % (ts, options, netbackup_service_host)
run_gpcommand(context, command)
@when('the user runs gp_restore with the the stored timestamp and subdir in "{dbname}" and bypasses ao stats using netbackup')
def impl(context, dbname):
if hasattr(context, 'backup_timestamp'):
ts = context.backup_timestamp
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
command = 'gp_restore -i --gp-k %s --gp-d db_dumps/%s --gp-i --gp-r db_dumps/%s --gp-l=p -d %s --gp-c --gp-nostats --netbackup-service-host %s' % (ts, context.backup_subdir, context.backup_subdir, dbname, netbackup_service_host)
run_gpcommand(context, command)
@when('the user runs gp_restore with the the stored timestamp and subdir for metadata only in "{dbname}" using netbackup')
def impl(context, dbname):
if hasattr(context, 'backup_timestamp'):
ts = context.backup_timestamp
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
command = 'gp_restore -i --gp-k %s --gp-d db_dumps/%s --gp-i --gp-r db_dumps/%s --gp-l=p -d %s --gp-c -s db_dumps/%s/gp_dump_1_1_%s.gz --netbackup-service-host %s' % \
(ts, context.backup_subdir, context.backup_subdir, dbname, context.backup_subdir, ts, netbackup_service_host)
run_gpcommand(context, command)
@when('the user runs gpdbrestore with the backup list stored timestamp and options "{options}" using netbackup')
def impl(context, options):
if hasattr(context, 'backup_timestamp_list'):
ts = context.backup_timestamp_list.pop(0)
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
if options == '-b':
command = 'gpdbrestore -e -b %s -a --netbackup-service-host %s' % (ts[0:8], netbackup_service_host)
else:
command = 'gpdbrestore -e -t %s %s -a --netbackup-service-host %s' % (ts, options, netbackup_service_host)
run_gpcommand(context, command)
@given('verify that {filetype} file has been backed up using nebBackup')
@when('verify that {filetype} file has been backed up using netbackup')
@then('verify that {filetype} file has been backed up using netbackup')
def impl(context, filetype):
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
if hasattr(context, 'backup_timestamp'):
backup_timestamp = context.backup_timestamp
if hasattr(context, 'backup_dir'):
backup_dir = context.backup_dir
#dump_dir = os.path.join(backup_dir, 'db_dumps', '%s' % (backup_timestamp[0:8]))
dump_dir = os.path.join(backup_dir, 'db_dumps')
else:
backup_dir = None
#dump_dir = os.path.join(master_data_dir, 'db_dumps', '%s' % (backup_timestamp[0:8]))
dump_dir = os.path.join(master_data_dir, 'db_dumps')
if filetype == 'report':
filename = generate_report_filename(master_data_dir, backup_dir, 'db_dumps', '', backup_timestamp)
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, filename)
cmd = Command("Querying NetBackup server for report file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != filename:
raise Exception('Report file %s was not backup up to NetBackup server %s successfully' % (filename, netbackup_service_host))
elif filetype == 'global':
filename = generate_global_filename(master_data_dir, backup_dir, 'db_dumps', '', backup_timestamp[0:8], backup_timestamp)
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, filename)
cmd = Command("Querying NetBackup server for global file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != filename:
raise Exception('Global file %s was not backup up to NetBackup server %s successfully' % (filename, netbackup_service_host))
elif filetype == 'config':
use_dir = get_backup_directory(master_data_dir, backup_dir, 'db_dumps', backup_timestamp)
master_config_filename = os.path.join(use_dir, "%s" % generate_master_config_filename('', backup_timestamp))
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, master_config_filename)
cmd = Command("Querying NetBackup server for master config file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != master_config_filename:
raise Exception('Master config file %s was not backup up to NetBackup server %s successfully' % (master_config_filename, netbackup_service_host))
master_port = os.environ.get('PGPORT')
gparray = GpArray.initFromCatalog(dbconn.DbURL(port = master_port), utility=True)
segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
for seg in segs:
use_dir = get_backup_directory(seg.getSegmentDataDirectory(), backup_dir, 'db_dumps', backup_timestamp)
seg_config_filename = os.path.join(use_dir, "%s" % generate_segment_config_filename('', seg.getSegmentDbId(), backup_timestamp))
seg_host = seg.getSegmentHostName()
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, seg_config_filename)
cmd = Command("Querying NetBackup server for segment config file", cmd_str, ctxt=REMOTE, remoteHost=seg_host)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != seg_config_filename:
raise Exception('Segment config file %s was not backup up to NetBackup server %s successfully' % (seg_config_filename, netbackup_service_host))
elif filetype == 'state':
filename = generate_ao_state_filename(master_data_dir, backup_dir, 'db_dumps', '', backup_timestamp)
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, filename)
cmd = Command("Querying NetBackup server for AO state file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != filename:
raise Exception('AO state file %s was not backup up to NetBackup server %s successfully' % (filename, netbackup_service_host))
filename = generate_co_state_filename(master_data_dir, backup_dir, 'db_dumps', '', backup_timestamp)
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, filename)
cmd = Command("Querying NetBackup server for CO state file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != filename:
raise Exception('CO state file %s was not backup up to NetBackup server %s successfully' % (filename, netbackup_service_host))
filename = generate_pgstatlastoperation_filename(master_data_dir, backup_dir, 'db_dumps', '', backup_timestamp)
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, filename)
cmd = Command("Querying NetBackup server for last operation state file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != filename:
raise Exception('Last operation state file %s was not backup up to NetBackup server %s successfully' % (filename, netbackup_service_host))
elif filetype == 'cdatabase':
filename = generate_cdatabase_filename(master_data_dir, backup_dir, 'db_dumps', '', backup_timestamp)
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, filename)
cmd = Command("Querying NetBackup server for cdatabase file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != filename:
raise Exception('Cdatabase file %s was not backup up to NetBackup server %s successfully' % (filename, netbackup_service_host))
@when('the user runs the "{cmd}" in a worker pool "{poolname}" using netbackup')
def impl(context, cmd, poolname):
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
if hasattr(context, 'netbackup_policy'):
netbackup_policy = context.netbackup_policy
if hasattr(context, 'netbackup_schedule'):
netbackup_schedule = context.netbackup_schedule
cmd = cmd + " --netbackup-service-host " + netbackup_service_host + " --netbackup-policy " + netbackup_policy + " --netbackup-schedule " + netbackup_schedule
command = Command(name='run gpcrondump in a separate thread', cmdStr=cmd)
pool = WorkerPool(numWorkers=1)
pool.addCommand(command)
if not hasattr(context, 'pool'):
context.pool = {}
context.pool[poolname] = pool
context.cmd = cmd
@then('the timestamps for database dumps are stored in a list')
def impl(context):
context.ts_list = get_timestamps_from_output(context)
def get_timestamps_from_output(context):
ts_list = []
stdout = context.stdout_message
for line in stdout.splitlines():
if 'Timestamp key = ' in line:
log_msg, delim, timestamp = line.partition('=')
ts = timestamp.strip()
validate_timestamp(ts)
ts_list.append(ts)
if ts_list is not []:
return ts_list
else:
raise Exception('Timestamp not found %s' % stdout)
@when('the user runs gpdbrestore for the database "{dbname}" with the stored timestamp using netbackup')
def impl(context, dbname):
if hasattr(context, 'db_timestamps'):
db_timestamps = context.db_timestamps
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
ts = db_timestamps[dbname]
command = 'gpdbrestore -e -a -t ' + ts + " --netbackup-service-host " + netbackup_service_host
run_gpcommand(context, command)
@given('verify that {filetype} file with prefix "{prefix}" under subdir "{subdir}" has been backed up using netbackup')
@when('verify that {filetype} file with prefix "{prefix}" under subdir "{subdir}" has been backed up using netbackup')
@then('verify that {filetype} file with prefix "{prefix}" under subdir "{subdir}" has been backed up using netbackup')
def impl(context, filetype, prefix, subdir):
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
if hasattr(context, 'backup_timestamp'):
backup_timestamp = context.backup_timestamp
subdir = subdir.strip()
if len(subdir) > 0:
dump_dir = os.path.join(subdir, 'db_dumps', '%s' % (backup_timestamp[0:8]))
else:
dump_dir = os.path.join(master_data_dir, 'db_dumps', '%s' % (backup_timestamp[0:8]))
prefix = prefix.strip()
if len(prefix) > 0:
prefix = prefix + '_'
if filetype == 'report':
#use_dir = get_backup_directory(master_data_dir, subdir, 'db_dumps', backup_timestamp)
filename = "%s/%sgp_dump_%s.rpt" % (dump_dir, prefix, backup_timestamp)
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, filename)
cmd = Command("Querying NetBackup server for report file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != filename:
raise Exception('Report file %s was not backup up to NetBackup server %s successfully' % (filename, netbackup_service_host))
elif filetype == 'global':
filename = os.path.join(dump_dir, "%sgp_global_1_1_%s" % (prefix, backup_timestamp))
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, filename)
cmd = Command("Querying NetBackup server for global file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != filename:
raise Exception('Global file %s was not backup up to NetBackup server %s successfully' % (filename, netbackup_service_host))
elif filetype == 'config':
use_dir = get_backup_directory(master_data_dir, subdir, 'db_dumps', backup_timestamp)
master_config_filename = os.path.join(dump_dir, "%sgp_master_config_files_%s.tar" % (prefix, backup_timestamp))
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, master_config_filename)
cmd = Command("Querying NetBackup server for master config file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != master_config_filename:
raise Exception('Master config file %s was not backup up to NetBackup server %s successfully' % (master_config_filename, netbackup_service_host))
master_port = os.environ.get('PGPORT')
gparray = GpArray.initFromCatalog(dbconn.DbURL(port = master_port), utility=True)
segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
for seg in segs:
use_dir = get_backup_directory(seg.getSegmentDataDirectory(), subdir, 'db_dumps', backup_timestamp)
seg_config_filename = os.path.join(use_dir, "%sgp_segment_config_files_0_%d_%s.tar" % (prefix, seg.getSegmentDbId(), backup_timestamp))
seg_host = seg.getSegmentHostName()
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, seg_config_filename)
cmd = Command("Querying NetBackup server for segment config file", cmd_str, ctxt=REMOTE, remoteHost=seg_host)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != seg_config_filename:
raise Exception('Segment config file %s was not backup up to NetBackup server %s successfully' % (seg_config_filename, netbackup_service_host))
elif filetype == 'state':
filename = "%s/%sgp_dump_%s_ao_state_file" % (dump_dir, prefix, backup_timestamp)
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, filename)
cmd = Command("Querying NetBackup server for AO state file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != filename:
raise Exception('AO state file %s was not backup up to NetBackup server %s successfully' % (filename, netbackup_service_host))
filename = "%s/%sgp_dump_%s_co_state_file" % (dump_dir, prefix, backup_timestamp)
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, filename)
cmd = Command("Querying NetBackup server for CO state file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != filename:
raise Exception('CO state file %s was not backup up to NetBackup server %s successfully' % (filename, netbackup_service_host))
filename = "%s/%sgp_dump_%s_last_operation" % (dump_dir, prefix, backup_timestamp)
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, filename)
cmd = Command("Querying NetBackup server for last operation state file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != filename:
raise Exception('Last operation state file %s was not backup up to NetBackup server %s successfully' % (filename, netbackup_service_host))
elif filetype == 'cdatabase':
filename = "%s/%sgp_cdatabase_1_1_%s" % (dump_dir, prefix, backup_timestamp)
cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (netbackup_service_host, filename)
cmd = Command("Querying NetBackup server for cdatabase file", cmd_str)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() != filename:
raise Exception('Cdatabase file %s was not backup up to NetBackup server %s successfully' % (filename, netbackup_service_host))
@when('all backup files under "{dir}" for stored dump timestamp are removed')
def impl(context, dir):
if hasattr(context, 'backup_timestamp'):
backup_timestamp = context.backup_timestamp
else:
raise Exception('No dump timestamp was stored from gpcrondump')
dump_date = backup_timestamp[0:8]
if dir.strip():
dump_dir = dir.strip()
else:
dump_dir = master_data_dir
backup_files_dir = os.path.join(dump_dir, 'db_dumps', dump_date)
rm_cmd_str = "rm -rf %s/*" % backup_files_dir
rm_cmd = Command("Remove files from dump dir location", rm_cmd_str)
rm_cmd.run(validateAfter=True)
def verify_num_files_with_nbu(results, expected_num_files, timestamp):
num_files = results.stdout.strip()
if num_files != expected_num_files:
raise Exception('Expected "%s" files with timestamp key "%s" but found "%s"' % (expected_num_files, timestamp,num_files))
def verify_timestamps_on_master_with_nbu(timestamp, dump_type):
list_cmd = 'ls -l %s/db_dumps/%s/*%s* | wc -l' % (master_data_dir, timestamp[:8], timestamp)
cmd = Command('verify timestamps on master', list_cmd)
cmd.run(validateAfter=True)
expected_num_files = '8' if dump_type == 'incremental' else '6'
verify_num_files_with_nbu(cmd.get_results(), expected_num_files, timestamp)
def verify_timestamps_on_segments_with_nbu(timestamp):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
primary_segs = [segdb for segdb in gparray.getDbList() if segdb.isSegmentPrimary()]
for seg in primary_segs:
db_dumps_dir = os.path.join(seg.getSegmentDataDirectory(),
'db_dumps',
timestamp[:8])
list_cmd = 'ls -l %s/*%s* | wc -l' % (db_dumps_dir, timestamp)
cmd = Command('get list of dump files', list_cmd, ctxt=REMOTE, remoteHost=seg.getSegmentHostName())
cmd.run(validateAfter=True)
verify_num_files_with_nbu(cmd.get_results(), '1', timestamp)
@then('verify that "{dump_type}" dump files using netbackup have stored timestamp in their filename')
def impl(context, dump_type):
if dump_type.strip().lower() != 'full' and dump_type.strip().lower() != 'incremental':
raise Exception('Invalid dump type "%s"' % dump_type)
verify_timestamps_on_master_with_nbu(context.backup_timestamp, dump_type.strip().lower())
verify_timestamps_on_segments_with_nbu(context.backup_timestamp)
@given('all netbackup objects containing "{substr}" are deleted')
@when('all netbackup objects containing "{substr}" are deleted')
@then('all netbackup objects containing "{substr}" are deleted')
def impl(context, substr):
if hasattr(context, 'netbackup_service_host'):
netbackup_service_host = context.netbackup_service_host
del_cmd_str = "gp_bsa_delete_agent --netbackup-service-host=%s --netbackup-delete-objects=*%s*" % (netbackup_service_host, substr)
cmd = Command('Delete the list of objects matching regex on NetBackup server', del_cmd_str)
cmd.run(validateAfter=True)
| randomtask1155/gpdb | gpMgmt/bin/gppylib/test/behave/mgmt_utils/steps/netbackup_mgmt_utils.py | Python | apache-2.0 | 27,950 |
from io import StringIO
import json
import logging
import sys
import uuid
import csv
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
import atsd_client
from atsd_client.exceptions import SQLException
from atsd_client.services import SQLService, MetricsService
enabled = True
except ImportError:
enabled = False
types_map = {
'long': TYPE_INTEGER,
'bigint': TYPE_INTEGER,
'integer': TYPE_INTEGER,
'smallint': TYPE_INTEGER,
'float': TYPE_FLOAT,
'double': TYPE_FLOAT,
'decimal': TYPE_FLOAT,
'string': TYPE_STRING,
'date': TYPE_DATE,
'xsd:dateTimeStamp': TYPE_DATETIME
}
def resolve_redash_type(type_in_atsd):
"""
Retrieve corresponding redash type
:param type_in_atsd: `str`
:return: redash type constant
"""
if isinstance(type_in_atsd, dict):
type_in_redash = types_map.get(type_in_atsd['base'])
else:
type_in_redash = types_map.get(type_in_atsd)
return type_in_redash
def generate_rows_and_columns(csv_response):
"""
Prepare rows and columns in redash format from ATSD csv response
:param csv_response: `str`
:return: prepared rows and columns
"""
meta, data = csv_response.split('\n', 1)
meta = meta[1:]
meta_with_padding = meta + '=' * (4 - len(meta) % 4)
meta_decoded = meta_with_padding.decode('base64')
meta_json = json.loads(meta_decoded)
meta_columns = meta_json['tableSchema']['columns']
reader = csv.reader(data.splitlines())
next(reader)
columns = [{'friendly_name': i['titles'],
'type': resolve_redash_type(i['datatype']),
'name': i['name']}
for i in meta_columns]
column_names = [c['name'] for c in columns]
rows = [dict(zip(column_names, row)) for row in reader]
return columns, rows
class AxibaseTSD(BaseQueryRunner):
noop_query = "SELECT 1"
@classmethod
def enabled(cls):
return enabled
@classmethod
def name(cls):
return "Axibase Time Series Database"
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'protocol': {
'type': 'string',
'title': 'Protocol',
'default': 'http'
},
'hostname': {
'type': 'string',
'title': 'Host',
'default': 'axibase_tsd_hostname'
},
'port': {
'type': 'number',
'title': 'Port',
'default': 8088
},
'username': {
'type': 'string'
},
'password': {
'type': 'string',
'title': 'Password'
},
'timeout': {
'type': 'number',
'default': 600,
'title': 'Connection Timeout'
},
'min_insert_date': {
'type': 'string',
'title': 'Metric Minimum Insert Date'
},
'expression': {
'type': 'string',
'title': 'Metric Filter'
},
'limit': {
'type': 'number',
'default': 5000,
'title': 'Metric Limit'
},
'trust_certificate': {
'type': 'boolean',
'title': 'Trust SSL Certificate'
}
},
'required': ['username', 'password', 'hostname', 'protocol', 'port'],
'secret': ['password']
}
def __init__(self, configuration):
super(AxibaseTSD, self).__init__(configuration)
self.url = '{0}://{1}:{2}'.format(self.configuration.get('protocol', 'http'),
self.configuration.get('hostname', 'localhost'),
self.configuration.get('port', 8088))
def run_query(self, query, user):
connection = atsd_client.connect_url(self.url,
self.configuration.get('username'),
self.configuration.get('password'),
verify=self.configuration.get('trust_certificate', False),
timeout=self.configuration.get('timeout', 600))
sql = SQLService(connection)
query_id = str(uuid.uuid4())
try:
logger.debug("SQL running query: %s", query)
data = sql.query_with_params(query, {'outputFormat': 'csv', 'metadataFormat': 'EMBED',
'queryId': query_id})
columns, rows = generate_rows_and_columns(data)
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=JSONEncoder)
error = None
except SQLException as e:
json_data = None
error = e.content
except (KeyboardInterrupt, InterruptException):
sql.cancel_query(query_id)
error = "Query cancelled by user."
json_data = None
except Exception:
raise sys.exc_info()[1], None, sys.exc_info()[2]
return json_data, error
def get_schema(self, get_stats=False):
connection = atsd_client.connect_url(self.url,
self.configuration.get('username'),
self.configuration.get('password'),
verify=self.configuration.get('trust_certificate', False),
timeout=self.configuration.get('timeout', 600))
metrics = MetricsService(connection)
ml = metrics.list(expression=self.configuration.get('expression', None),
minInsertDate=self.configuration.get('min_insert_date', None),
limit=self.configuration.get('limit', 5000))
metrics_list = [i.name.encode('utf-8') for i in ml]
metrics_list.append('atsd_series')
schema = {}
default_columns = ['entity', 'datetime', 'time', 'metric', 'value', 'text',
'tags', 'entity.tags', 'metric.tags']
for table_name in metrics_list:
schema[table_name] = {'name': "'{}'".format(table_name),
'columns': default_columns}
values = schema.values()
return values
register(AxibaseTSD)
| hudl/redash | redash/query_runner/axibase_tsd.py | Python | bsd-2-clause | 6,833 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_points05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with point formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [45471616, 46804992]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'marker': {'type': 'automatic'},
'points': [{'fill': {'color': 'red'}}],
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
'marker': {'type': 'automatic'},
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| jvrsantacruz/XlsxWriter | xlsxwriter/test/comparison/test_chart_points05.py | Python | bsd-2-clause | 1,796 |
from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('Group', 'visible', models.BooleanField, initial=True)
]
| 1tush/reviewboard | reviewboard/reviews/evolutions/group_visible.py | Python | mit | 204 |
import time
import sys
from mitmproxy.script import concurrent
@concurrent
def request(flow):
time.sleep(0.1)
| Kriechi/mitmproxy | test/mitmproxy/data/addonscripts/concurrent_decorator.py | Python | mit | 116 |
import numpy
from pycbc.filter import matched_filter
snr = matched_filter(hp, zoom.to_pycbc(), psd=psd.to_pycbc(),
low_frequency_cutoff=15)
snrts = TimeSeries.from_pycbc(snr).abs() | gwpy/gwpy.github.io | docs/v0.5/examples/timeseries/pycbc-snr-5.py | Python | gpl-3.0 | 201 |
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mssdk.py 2014/09/27 12:51:43 garyo"
"""engine.SCons.Tool.mssdk
Tool-specific initialization for Microsoft SDKs, both Platform
SDKs and Windows SDKs.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
from MSCommon import mssdk_exists, \
mssdk_setup_env
def generate(env):
"""Add construction variables for an MS SDK to an Environment."""
mssdk_setup_env(env)
def exists(env):
return mssdk_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| stonekyx/binary | vendor/scons-local-2.3.4/SCons/Tool/mssdk.py | Python | gpl-3.0 | 1,804 |
""" codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import builtins, sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError as why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder",
"StreamReader", "StreamWriter",
"StreamReaderWriter", "StreamRecoder",
"getencoder", "getdecoder", "getincrementalencoder",
"getincrementaldecoder", "getreader", "getwriter",
"encode", "decode", "iterencode", "iterdecode",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors", "backslashreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = b'\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = b'\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = b'\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
"""Codec details when looking up the codec registry"""
# Private API to allow Python 3.4 to blacklist the known non-Unicode
# codecs in the standard library. A more general mechanism to
# reliably distinguish test encodings from other codecs will hopefully
# be defined for Python 3.5
#
# See http://bugs.python.org/issue19619
_is_text_encoding = True # Assume codecs are text encodings by default
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None,
*, _is_text_encoding=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
if _is_text_encoding is not None:
self._is_text_encoding = _is_text_encoding
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % \
(self.__class__.__module__, self.__class__.__name__,
self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'surrogateescape' - replace with private code points U+DCnn.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamWriter for codecs which have to keep state in order to
make encoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamReader for codecs which have to keep state in order to
make decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can
be passed piece by piece to the encode() method. The IncrementalEncoder
remembers the state of the encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
# unencoded input that is kept between calls to encode()
self.buffer = ""
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can
be passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Create an IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decode input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Reset the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete
byte sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
# undecoded input that is kept between calls to decode()
self.buffer = b""
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = b""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
charbuffertype = str
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = b""
self._empty_charbuffer = self.charbuffertype()
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of decoded code points or bytes to
return. read() will never return more data than requested,
but it might return less, if there is not enough available.
size indicates the approximate maximum number of decoded
bytes or code points to read for decoding. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy, meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars >= 0:
if len(self.charbuffer) >= chars:
break
elif size >= 0:
if len(self.charbuffer) >= size:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
if not data:
break
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
newchars, decodedbytes = \
self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(keepends=True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = self._empty_charbuffer
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(keepends=False)[0]
return line
readsize = size or 72
line = self._empty_charbuffer
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if (isinstance(data, str) and data.endswith("\r")) or \
(isinstance(data, bytes) and data.endswith(b"\r")):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(keepends=True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(keepends=False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(keepends=False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \
self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(keepends=False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as a list.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = b""
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def __next__(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def __next__(self):
""" Return the next decoded line from the input stream."""
return next(self.reader)
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances translate data from one encoding to another.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the StreamRecoder is first decoded into an
intermediate format (depending on the "decode" codec) and then
written to the underlying stream using an instance of the provided
Writer class.
In the other direction, data is read from the underlying stream using
a Reader instance and then encoded and returned to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
data visible to .read() and .write()) while Reader and Writer
work on the backend (the data in stream).
You can use these objects to do transparent
transcodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode and decode must adhere to the Codec interface; Reader and
Writer must be factory functions or classes providing the
StreamReader and StreamWriter interfaces resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(keepends=True)
def __next__(self):
""" Return the next decoded line from the input stream."""
data = next(self.reader)
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='r', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Underlying encoded files are always opened in binary mode.
The default file mode is 'r', meaning to open the file in read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Data written to the wrapped file is decoded according
to the given data_encoding and then encoded to the underlying
file using file_encoding. The intermediate data type
will usually be Unicode but depends on the specified codecs.
Bytes read from the file are decoded using file_encoding and then
passed back to the caller encoded using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using an IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using an IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode(b"", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng}
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \\u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
| technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/codecs.py | Python | gpl-3.0 | 35,910 |
"""
The Python API layer of the country access settings. Essentially the middle tier of the project, responsible for all
business logic that is not directly tied to the data itself.
This API is exposed via the middleware(emabargo/middileware.py) layer but may be used directly in-process.
"""
import logging
import pygeoip
from django.core.cache import cache
from django.conf import settings
from rest_framework.response import Response
from rest_framework import status
from ipware.ip import get_ip
from student.auth import has_course_author_access
from .models import CountryAccessRule, RestrictedCourse
log = logging.getLogger(__name__)
def redirect_if_blocked(course_key, access_point='enrollment', **kwargs):
"""Redirect if the user does not have access to the course. In case of blocked if access_point
is not enrollment and course has enabled is_disabled_access_check then user can view that course.
Arguments:
course_key (CourseKey): Location of the course the user is trying to access.
Keyword Arguments:
Same as `check_course_access` and `message_url_path`
"""
if settings.FEATURES.get('EMBARGO'):
is_blocked = not check_course_access(course_key, **kwargs)
if is_blocked:
if access_point == "courseware":
if not RestrictedCourse.is_disabled_access_check(course_key):
return message_url_path(course_key, access_point)
else:
return message_url_path(course_key, access_point)
def check_course_access(course_key, user=None, ip_address=None, url=None):
"""
Check is the user with this ip_address has access to the given course
Arguments:
course_key (CourseKey): Location of the course the user is trying to access.
Keyword Arguments:
user (User): The user making the request. Can be None, in which case
the user's profile country will not be checked.
ip_address (str): The IP address of the request.
url (str): The URL the user is trying to access. Used in
log messages.
Returns:
Boolean: True if the user has access to the course; False otherwise
"""
# No-op if the country access feature is not enabled
if not settings.FEATURES.get('EMBARGO'):
return True
# First, check whether there are any restrictions on the course.
# If not, then we do not need to do any further checks
course_is_restricted = RestrictedCourse.is_restricted_course(course_key)
if not course_is_restricted:
return True
# Always give global and course staff access, regardless of embargo settings.
if user is not None and has_course_author_access(user, course_key):
return True
if ip_address is not None:
# Retrieve the country code from the IP address
# and check it against the allowed countries list for a course
user_country_from_ip = _country_code_from_ip(ip_address)
if not CountryAccessRule.check_country_access(course_key, user_country_from_ip):
log.info(
(
u"Blocking user %s from accessing course %s at %s "
u"because the user's IP address %s appears to be "
u"located in %s."
),
getattr(user, 'id', '<Not Authenticated>'),
course_key,
url,
ip_address,
user_country_from_ip
)
return False
if user is not None:
# Retrieve the country code from the user's profile
# and check it against the allowed countries list for a course.
user_country_from_profile = _get_user_country_from_profile(user)
if not CountryAccessRule.check_country_access(course_key, user_country_from_profile):
log.info(
(
u"Blocking user %s from accessing course %s at %s "
u"because the user's profile country is %s."
),
user.id, course_key, url, user_country_from_profile
)
return False
return True
def message_url_path(course_key, access_point):
"""Determine the URL path for the message explaining why the user was blocked.
This is configured per-course. See `RestrictedCourse` in the `embargo.models`
module for more details.
Arguments:
course_key (CourseKey): The location of the course.
access_point (str): How the user was trying to access the course.
Can be either "enrollment" or "courseware".
Returns:
unicode: The URL path to a page explaining why the user was blocked.
Raises:
InvalidAccessPoint: Raised if access_point is not a supported value.
"""
return RestrictedCourse.message_url_path(course_key, access_point)
def _get_user_country_from_profile(user):
"""
Check whether the user is embargoed based on the country code in the user's profile.
Args:
user (User): The user attempting to access courseware.
Returns:
user country from profile.
"""
cache_key = u'user.{user_id}.profile.country'.format(user_id=user.id)
profile_country = cache.get(cache_key)
if profile_country is None:
profile = getattr(user, 'profile', None)
if profile is not None and profile.country.code is not None:
profile_country = profile.country.code.upper()
else:
profile_country = ""
cache.set(cache_key, profile_country)
return profile_country
def _country_code_from_ip(ip_addr):
"""
Return the country code associated with an IP address.
Handles both IPv4 and IPv6 addresses.
Args:
ip_addr (str): The IP address to look up.
Returns:
str: A 2-letter country code.
"""
if ip_addr.find(':') >= 0:
return pygeoip.GeoIP(settings.GEOIPV6_PATH).country_code_by_addr(ip_addr)
else:
return pygeoip.GeoIP(settings.GEOIP_PATH).country_code_by_addr(ip_addr)
def get_embargo_response(request, course_id, user):
"""
Check whether any country access rules block the user from enrollment.
Args:
request (HttpRequest): The request object
course_id (str): The requested course ID
user (str): The current user object
Returns:
HttpResponse: Response of the embargo page if embargoed, None if not
"""
redirect_url = redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request), url=request.path)
if redirect_url:
return Response(
status=status.HTTP_403_FORBIDDEN,
data={
"message": (
u"Users from this location cannot access the course '{course_id}'."
).format(course_id=course_id),
"user_message_url": request.build_absolute_uri(redirect_url)
}
)
| synergeticsedx/deployment-wipro | openedx/core/djangoapps/embargo/api.py | Python | agpl-3.0 | 6,954 |
#!/usr/bin/env python
# Line too long - pylint: disable=C0301
# Invalid name - pylint: disable=C0103
#
# Copyright (c) Greenplum Inc 2010. All Rights Reserved.
#
# Note: the option to recover to a new host is not very good if we have a multi-home configuration
#
# Options removed when 4.0 gprecoverseg was implemented:
# --version
# -S "Primary segment dbid to force recovery": I think this is done now by bringing the primary down, waiting for
# failover, and then doing recover full
# -z "Primary segment data dir and host to force recovery" see removed -S option for comment
# -f : force Greenplum Database instance shutdown and restart
# -F (HAS BEEN CHANGED) -- used to mean "force recovery" and now means "full recovery)
# And a change to the -i input file: it now takes replicationPort in list of args (for failover target)
#
# import mainUtils FIRST to get python version check
# THIS IMPORT SHOULD COME FIRST
from gppylib.mainUtils import *
from optparse import Option, OptionGroup, OptionParser, OptionValueError, SUPPRESS_USAGE
import os, sys, getopt, socket, StringIO, signal
from gppylib import gparray, gplog, pgconf, userinput, utils
from gppylib.util import gp_utils
from gppylib.commands import base, gp, pg, unix
from gppylib.db import catalog, dbconn
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib.operations.startSegments import *
from gppylib.operations.buildMirrorSegments import *
from gppylib.operations.rebalanceSegments import GpSegmentRebalanceOperation
from gppylib.programs import programIoUtils
from gppylib.programs.clsAddMirrors import validateFlexibleHeadersListAllFilespaces
from gppylib.system import configurationInterface as configInterface
from gppylib.system.environment import GpMasterEnvironment
from gppylib.testold.testUtils import *
from gppylib.parseutils import line_reader, parse_filespace_order, parse_gprecoverseg_line, \
canonicalize_address
from gppylib.utils import ParsedConfigFile, ParsedConfigFileRow, writeLinesToFile, \
normalizeAndValidateInputPath, TableLogger
from gppylib.gphostcache import GpInterfaceToHostNameCache
from gppylib.operations.utils import ParallelOperation
from gppylib.operations.package import SyncPackages
import gppylib.commands.gp
logger = gplog.get_default_logger()
class PortAssigner:
"""
Used to assign new ports to segments on a host
Note that this could be improved so that we re-use ports for segments that are being recovered but this
does not seem necessary.
"""
MAX_PORT_EXCLUSIVE=65536
def __init__(self, gpArray):
#
# determine port information for recovering to a new host --
# we need to know the ports that are in use and the valid range of ports
#
segments = gpArray.getDbList()
ports = [seg.getSegmentPort() for seg in segments if seg.isSegmentQE()]
if len(ports) > 0:
self.__minPort = min(ports)
else:
raise Exception("No segment ports found in array.")
self.__usedPortsByHostName = {}
byHost = GpArray.getSegmentsByHostName(segments)
for hostName, segments in byHost.iteritems():
usedPorts = self.__usedPortsByHostName[hostName] = {}
for seg in segments:
usedPorts[seg.getSegmentPort()] = True
def findAndReservePort(self, hostName, address):
"""
Find an unused port of the given type (normal postmaster or replication port)
When found, add an entry: usedPorts[port] = True and return the port found
Otherwise raise an exception labeled with the given address
"""
if hostName not in self.__usedPortsByHostName:
self.__usedPortsByHostName[hostName] = {}
usedPorts = self.__usedPortsByHostName[hostName]
minPort = self.__minPort
for port in range(minPort, PortAssigner.MAX_PORT_EXCLUSIVE):
if port not in usedPorts:
usedPorts[port] = True
return port
raise Exception("Unable to assign port on %s" % address)
#-------------------------------------------------------------------------
class GpRecoverSegmentProgram:
#
# Constructor:
#
# @param options the options as returned by the options parser
#
def __init__(self, options):
self.__options = options
self.__pool = None
self.logger = logger
def outputToFile(self, mirrorBuilder, gpArray, fileName):
lines = []
#
# first line is always the filespace order
#
filespaceArr = [fs for fs in gpArray.getFilespaces(False)]
lines.append("filespaceOrder=" + (":".join([fs.getName() for fs in filespaceArr])))
# now one for each failure
for mirror in mirrorBuilder.getMirrorsToBuild():
str = ""
seg = mirror.getFailedSegment()
addr = canonicalize_address( seg.getSegmentAddress() )
str += ('%s:%d:%s' % ( addr, seg.getSegmentPort(), canonicalize_address(seg.getSegmentDataDirectory())))
seg = mirror.getFailoverSegment()
if seg is not None:
#
# build up :path1:path2 for the mirror segment's filespace paths
#
segFilespaces = seg.getSegmentFilespaces()
filespaceValues = []
for fs in filespaceArr :
path = segFilespaces.get(fs.getOid())
assert path is not None # checking consistency should have been done earlier, but doublecheck here
filespaceValues.append(":" + canonicalize_address(path))
str += ' '
addr = canonicalize_address( seg.getSegmentAddress() )
str += ('%s:%d:%d:%s%s' % (addr, seg.getSegmentPort(), seg.getSegmentReplicationPort(), seg.getSegmentDataDirectory(),
"".join(filespaceValues)))
lines.append(str)
writeLinesToFile(fileName, lines)
def getRecoveryActionsFromConfigFile(self, gpArray):
"""
getRecoveryActionsFromConfigFile
returns a GpMirrorListToBuild object
"""
# create fileData object from config file
#
filename = self.__options.recoveryConfigFile
fslist = None
rows = []
with open(filename) as f:
for lineno, line in line_reader(f):
if fslist is None:
fslist = parse_filespace_order(filename, lineno, line)
else:
fixed, flexible = parse_gprecoverseg_line(filename, lineno, line, fslist)
rows.append( ParsedConfigFileRow(fixed, flexible, line) )
fileData = ParsedConfigFile(fslist, rows)
# validate fileData
#
validateFlexibleHeadersListAllFilespaces("Segment recovery config", gpArray, fileData)
filespaceNameToFilespace = dict([ (fs.getName(), fs) for fs in gpArray.getFilespaces(False)])
allAddresses = [row.getFixedValuesMap()["newAddress"] for row in fileData.getRows()
if "newAddress" in row.getFixedValuesMap()]
allNoneArr = [None for a in allAddresses]
interfaceLookup = GpInterfaceToHostNameCache(self.__pool, allAddresses, allNoneArr)
failedSegments = []
failoverSegments = []
for row in fileData.getRows():
fixedValues = row.getFixedValuesMap()
flexibleValues = row.getFlexibleValuesMap()
# find the failed segment
failedAddress = fixedValues['failedAddress']
failedPort = fixedValues['failedPort']
failedDataDirectory = normalizeAndValidateInputPath( fixedValues['failedDataDirectory'],
"config file", row.getLine())
failedSegment = None
for segment in gpArray.getDbList():
if segment.getSegmentAddress() == failedAddress and \
str(segment.getSegmentPort()) == failedPort and \
segment.getSegmentDataDirectory() == failedDataDirectory:
if failedSegment is not None:
#
# this could be an assertion -- configuration should not allow multiple entries!
#
raise Exception(("A segment to recover was found twice in configuration. " \
"This segment is described by address:port:directory '%s:%s:%s' on the input line: %s") %
(failedAddress, failedPort, failedDataDirectory, row.getLine()))
failedSegment = segment
if failedSegment is None:
raise Exception("A segment to recover was not found in configuration. " \
"This segment is described by address:port:directory '%s:%s:%s' on the input line: %s" %
(failedAddress, failedPort, failedDataDirectory, row.getLine()))
failoverSegment = None
if "newAddress" in fixedValues:
"""
When the second set was passed, the caller is going to tell us to where we need to failover, so
build a failover segment
"""
# these two lines make it so that failoverSegment points to the object that is registered in gparray
failoverSegment = failedSegment
failedSegment = failoverSegment.copy()
address = fixedValues["newAddress"]
try:
port = int(fixedValues["newPort"])
replicationPort = int(fixedValues["newReplicationPort"])
except ValueError:
raise Exception( 'Config file format error, invalid number value in line: %s' % (row.getLine()))
dataDirectory = normalizeAndValidateInputPath(fixedValues["newDataDirectory"], "config file", row.getLine())
hostName = interfaceLookup.getHostName(address)
if hostName is None:
raise Exception( 'Unable to find host name for address %s from line:%s' % (address, row.getLine()))
filespaceOidToPathMap = {}
for fsName, path in flexibleValues.iteritems():
path = normalizeAndValidateInputPath(path, "config file", row.getLine())
filespaceOidToPathMap[filespaceNameToFilespace[fsName].getOid()] = path
# now update values in failover segment
failoverSegment.setSegmentAddress( address )
failoverSegment.setSegmentHostName( hostName )
failoverSegment.setSegmentPort( port )
failoverSegment.setSegmentReplicationPort( replicationPort )
failoverSegment.setSegmentDataDirectory( dataDirectory )
for fsOid, path in filespaceOidToPathMap.iteritems():
failoverSegment.getSegmentFilespaces()[fsOid] = path
failoverSegment.getSegmentFilespaces()[gparray.SYSTEM_FILESPACE] = dataDirectory
# this must come AFTER the if check above because failedSegment can be adjusted to
# point to a different object
failedSegments.append(failedSegment)
failoverSegments.append(failoverSegment)
peersForFailedSegments = self.findAndValidatePeersForFailedSegments(gpArray, failedSegments)
segs = []
for i in range(len(failedSegments)):
segs.append( GpMirrorToBuild(failedSegments[i], peersForFailedSegments[i], failoverSegments[i], \
self.__options.forceFullResynchronization))
return GpMirrorListToBuild(segs, self.__pool, self.__options.quiet, self.__options.parallelDegree)
def findAndValidatePeersForFailedSegments(self, gpArray, failedSegments):
"""
findAndValidatePeersForFailedSegments will returns random segments for failed segments.
"""
segs = [ seg for seg in gpArray.get_valid_segdbs() if not seg.isSegmentDown()]
if len(segs) <= 0:
raise Exception("No available segment in the cluster.")
peersForFailedSegments = [ segs[0] for seg in failedSegments]
for i in range(len(failedSegments)):
peer = peersForFailedSegments[i]
if peer is None:
raise Exception("No peer found for dbid %s" % failedSegments[i].getSegmentDbId())
elif peer.isSegmentDown():
raise Exception("Both segments for content %s are down; Try restarting Greenplum DB and running %s again." %
(peer.getSegmentContentId(), getProgramName()))
return peersForFailedSegments
def __outputSpareDataDirectoryFile( self, gpEnv, gpArray, outputFile):
lines = []
for fs in gpArray.getFilespaces():
if gpArray.isFileSpaceShared(fs.getOid()):
lines.append(fs.getName() + "=sharedFilespaceCannotMove")
else:
lines.append(fs.getName() + "=enterFilespacePath")
lines.sort()
utils.writeLinesToFile(outputFile, lines)
self.logger.info("Wrote sample configuration file %s" % outputFile)
self.logger.info("MODIFY IT and then run with gprecoverseg -s %s" % outputFile)
def __readSpareDirectoryMap(self, gpArray, spareDataDirectoryFile):
"""
Read filespaceName=path configuration from spareDataDirectoryFile
File format should be in sync with format printed by __outputSpareDataDirectoryFile
@return a dictionary mapping filespace oid to path
"""
filespaceNameToFilespace = dict([ (fs.getName(), fs) for fs in gpArray.getFilespaces()])
specifiedFilespaceNames = {}
fsOidToPath = {}
for line in utils.readAllLinesFromFile(spareDataDirectoryFile, skipEmptyLines=True, stripLines=True):
arr = line.split("=")
if len(arr) != 2:
raise Exception("Invalid line in spare directory configuration file: %s" % line)
fsName = arr[0]
path = arr[1]
if fsName in specifiedFilespaceNames:
raise Exception("Filespace %s has multiple entries in spare directory configuration file." % fsName )
specifiedFilespaceNames[fsName] = True
if fsName not in filespaceNameToFilespace:
raise Exception("Invalid filespace %s in spare directory configuration file." % fsName )
oid = filespaceNameToFilespace[fsName].getOid()
if gpArray.isFileSpaceShared(oid):
path = None
else:
path = normalizeAndValidateInputPath(path, "config file" )
fsOidToPath[oid] = path
if len(fsOidToPath) != len(filespaceNameToFilespace):
raise Exception("Filespace configuration file only lists %s of needed %s filespace directories. "
"Use -S option to create sample input file." %
(len(fsOidToPath), len(filespaceNameToFilespace)))
return fsOidToPath
def __applySpareDirectoryMapToSegment( self, gpEnv, gpArray, spareDirectoryMap, segment):
gpPrefix = gp_utils.get_gp_prefix(gpEnv.getMasterDataDir())
if not gpPrefix:
gpPrefix = 'gp'
fsMap = segment.getSegmentFilespaces()
for oid, path in spareDirectoryMap.iteritems():
# Shared storage can not be relocated
if (gpArray.isFileSpaceShared(oid)):
continue
newPath = utils.createSegmentSpecificPath( path, gpPrefix, segment)
fsMap[oid] = newPath
if oid == gparray.SYSTEM_FILESPACE:
segment.setSegmentDataDirectory( newPath )
def getRecoveryActionsFromConfiguration(self, gpEnv, gpArray):
"""
getRecoveryActionsFromConfiguration
returns a GpMirrorListToBuild object
"""
segments = gpArray.getSegDbList()
failedSegments = [ seg for seg in segments if seg.isSegmentDown() ]
peersForFailedSegments = self.findAndValidatePeersForFailedSegments(gpArray, failedSegments)
# Dictionaries used for building mapping to new hosts
recoverAddressMap = {}
recoverHostMap = {}
interfaceHostnameWarnings = []
# Check if the array is a "standard" array
(isStandardArray, _ignore) = gpArray.isStandardArray()
recoverHostIdx = 0
if self.__options.newRecoverHosts and len(self.__options.newRecoverHosts) > 0:
for seg in failedSegments:
segAddress = seg.getSegmentAddress()
segHostname = seg.getSegmentHostName()
# Haven't seen this hostname before so we put it on a new host
if not recoverHostMap.has_key(segHostname):
try:
recoverHostMap[segHostname] = self.__options.newRecoverHosts[recoverHostIdx]
except:
# If we get here, not enough hosts were specified in the -p option. Need 1 new host
# per 1 failed host.
raise Exception('Not enough new recovery hosts given for recovery.')
recoverHostIdx += 1
if isStandardArray:
# We have a standard array configuration, so we'll try to use the same
# interface naming convention. If this doesn't work, we'll correct it
# below on name lookup
segInterface = segAddress[segAddress.rfind('-'):]
destAddress = recoverHostMap[segHostname] + segInterface
destHostname = recoverHostMap[segHostname]
else:
# Non standard configuration so we won't make assumptions on
# naming. Instead we'll use the hostname passed in for both
# hostname and address and flag for warning later.
destAddress = recoverHostMap[segHostname]
destHostname = recoverHostMap[segHostname]
# Save off the new host/address for this address.
recoverAddressMap[segAddress] = (destHostname, destAddress)
# Now that we've generated the mapping, look up all the addresses to make
# sure they are resolvable.
interfaces = [address for (_ignore, address) in recoverAddressMap.values()]
interfaceLookup = GpInterfaceToHostNameCache(self.__pool, interfaces, [None] * len(interfaces))
for key in recoverAddressMap.keys():
(newHostname, newAddress) = recoverAddressMap[key]
try:
addressHostnameLookup = interfaceLookup.getHostName(newAddress)
# Lookup failed so use hostname passed in for everything.
if addressHostnameLookup is None:
interfaceHostnameWarnings.append("Lookup of %s failed. Using %s for both hostname and address." % (newAddress, newHostname))
newAddress = newHostname
except:
# Catch all exceptions. We will use hostname instead of address
# that we generated.
interfaceHostnameWarnings.append("Lookup of %s failed. Using %s for both hostname and address." % (newAddress, newHostname))
newAddress = newHostname
# if we've updated the address to use the hostname because of lookup failure
# make sure the hostname is resolvable and up
if newHostname == newAddress:
try:
unix.Ping.local("ping new hostname", newHostname)
except:
raise Exception("Ping of host %s failed." % newHostname)
# Save changes in map
recoverAddressMap[key] = (newHostname, newAddress)
if len(self.__options.newRecoverHosts) != recoverHostIdx:
interfaceHostnameWarnings.append("The following recovery hosts were not needed:")
for h in self.__options.newRecoverHosts[recoverHostIdx:]:
interfaceHostnameWarnings.append("\t%s" % h)
spareDirectoryMap = None
if self.__options.spareDataDirectoryFile is not None:
spareDirectoryMap = self.__readSpareDirectoryMap(gpArray, self.__options.spareDataDirectoryFile)
portAssigner = PortAssigner(gpArray)
forceFull = self.__options.forceFullResynchronization
segs = []
for i in range(len(failedSegments)):
failoverSegment = None
failedSegment = failedSegments[i]
liveSegment = peersForFailedSegments[i]
if self.__options.newRecoverHosts and len(self.__options.newRecoverHosts) > 0:
(newRecoverHost, newRecoverAddress) = recoverAddressMap[failedSegment.getSegmentAddress()]
# these two lines make it so that failoverSegment points to the object that is registered in gparray
failoverSegment = failedSegment
failedSegment = failoverSegment.copy()
failoverSegment.setSegmentHostName( newRecoverHost )
failoverSegment.setSegmentAddress( newRecoverAddress )
port = portAssigner.findAndReservePort(newRecoverHost, newRecoverAddress )
failoverSegment.setSegmentPort( port )
if spareDirectoryMap is not None:
#
# these two lines make it so that failoverSegment points to the object that is registered in gparray
failoverSegment = failedSegment
failedSegment = failoverSegment.copy()
self.__applySpareDirectoryMapToSegment( gpEnv, gpArray, spareDirectoryMap, failoverSegment)
# we're failing over to different location on same host so we don't need to assign new ports
segs.append( GpMirrorToBuild(failedSegment, liveSegment, failoverSegment, forceFull ))
return GpMirrorListToBuild(segs, self.__pool, self.__options.quiet, self.__options.parallelDegree, interfaceHostnameWarnings)
def GPSQLFailback(self, gpArray, gpEnv):
if self.__options.outputSpareDataDirectoryFile is not None:
self.__outputSpareDataDirectoryFile(gpEnv, gpArray, self.__options.outputSpareDataDirectoryFile)
return 0
if self.__options.newRecoverHosts is not None:
try:
uniqueHosts = []
[uniqueHosts.append(h.strip()) for h in self.__options.newRecoverHosts.split(',') \
if h.strip() not in uniqueHosts ]
self.__options.newRecoverHosts = uniqueHosts
except Exception, ex:
raise ProgramArgumentValidationException(\
"Invalid value for recover hosts: %s" % ex)
# GPSQL can not fail to the host already exists in cluster.
segmentsHosts = []
[ segmentsHosts.append(seg.getSegmentHostName()) for seg in gpArray.getSegDbList() if seg.isSegmentQE() ]
for seg in segmentsHosts:
if seg in uniqueHosts:
raise Exception("You can not choose the host \"%s\" which has segments running for recover host." % seg)
# If it's a rebalance operation
if self.__options.rebalanceSegments:
raise Exception("GPSQL does not support rebalance.")
# retain list of hosts that were existing in the system prior to getRecoverActions...
# this will be needed for later calculations that determine whether
# new hosts were added into the system
existing_hosts = set(gpArray.getHostList())
# figure out what needs to be done
mirrorBuilder = self.getRecoveryActionsBasedOnOptions(gpEnv, gpArray)
if self.__options.outputSampleConfigFile is not None:
# just output config file and done
self.outputToFile(mirrorBuilder, gpArray, self.__options.outputSampleConfigFile)
self.logger.info('Configuration file output to %s successfully.' % self.__options.outputSampleConfigFile)
elif len(mirrorBuilder.getMirrorsToBuild()) == 0:
self.logger.info('No segments to recover')
else:
mirrorBuilder.checkForPortAndDirectoryConflicts(gpArray)
self.displayRecovery(mirrorBuilder, gpArray)
self.__displayRecoveryWarnings(mirrorBuilder)
if self.__options.interactive:
if not userinput.ask_yesno(None, "\nContinue with segment recovery procedure", 'N'):
raise UserAbortedException()
# sync packages
current_hosts = set(gpArray.getHostList())
new_hosts = current_hosts - existing_hosts
if new_hosts:
self.syncPackages(new_hosts)
mirrorBuilder.buildMirrors("recover", gpEnv, gpArray )
return 1
confProvider.sendPgElogFromMaster("Recovery of %d segment(s) has been started." % \
len(mirrorBuilder.getMirrorsToBuild()), True)
self.logger.info("******************************************************************")
self.logger.info("Updating segments for resynchronization is completed.")
self.logger.info("For segments updated successfully, resynchronization will continue in the background.")
self.logger.info("")
self.logger.info("Use gpstate -s to check the resynchronization progress.")
self.logger.info("******************************************************************")
return 0 # success -- exit code 0!
# San-failback is handled separately from the Filerep-recovery operations.
#
# I need to walk through results returned by shell commands digging for information
# this isn't as easy as I'd like.
def SanFailback(self, array_config, gpEnv):
# Get the configuration information maps.
(san_mounts, san_mount_by_dbid) = array_config.getSanConfigMaps()
# 1 Get the failed segments
bad_segs = {}
for dbid, v in san_mount_by_dbid.iteritems():
(status, content, mountlist) = v
if status == 'd':
self.logger.info('Bad segment with dbid %d' % dbid)
bad_segs[dbid] = (status, content, mountlist)
# 2 Get the failed mountpoints.
bad_mounts = {}
for mount_id, v in san_mounts.iteritems():
if v['active'] == 'm':
self.logger.info('Bad mountpoint with id %d' % mount_id)
bad_mounts[mount_id] = v
# 3 Verify that the required hosts are back up (this may reduce the number of recoverable segments)
recoverable_mps = {}
for mount_id, v in bad_mounts.iteritems():
try:
unix.Echo.remote('check host', 'Success', v['primaryhost'])
recoverable_mps[mount_id] = v
except:
# Host not available, not added to recoverable_mps. We'll ignore
# because there may be others we can recover
pass
# 4
# From the recoverable mountpoints, we should now be able to identify the
# mountpoints required to recover for the segments. A segment is recoverable
# if all of its mountpoints are recoverable.
recoverable_segs = {}
for dbid, v in bad_segs.iteritems():
(status, content, mountlist) = v
recoverable = True
for mount_id in mountlist:
if not recoverable_mps.has_key(mount_id):
recoverable = False
break
if recoverable:
recoverable_segs[dbid] = v
else:
self.logger.warning('Unrecoverable segment dbid %d' % (dbid))
if len(recoverable_segs) == 0:
raise Exception("Found no recoverable segments.")
# 4 Stop GPDB.
e = os.system('gpstop -aq -d %s' % (os.environ.get('MASTER_DATA_DIRECTORY')))
ok = not e
if not ok:
self.logger.error('Failed to shutdown Greenplum Database: segment recovery cannot proceed.')
raise Exception("Failed to shutdown GPDB. Segment recovery failed.")
else:
self.logger.info('Successfully shutdown the Greenplum Database')
# 5 Move mountpoints
# For each recoverable seg, walk its mountlist.
# 5a
# unmount on failover host.
# 5b
# reconnect to primary.
# 5c
# mount on primary.
mount_done = {}
for dbid, v in recoverable_segs.iteritems():
(status, content, mountlist) = v
for mount_id in mountlist:
if mount_done.has_key(mount_id):
continue # already moved this
if self.SanFailback_mountpoint(mount_id, recoverable_mps[mount_id]) == 0:
# TODO: some kind of error handling here ??
mount_done[mount_id] = True
else:
mount_done[mount_id] = False
self.logger.debug('Completed mount-recovery:')
for mount_id, v in mount_done.iteritems():
if v:
self.logger.debug('mount-id %d ---> TRUE' % mount_id)
else:
self.logger.debug('mount-id %d ---> FALSE' % mount_id)
# N - 3
# Start GPDB in admin-mode
os.putenv('GPSTART_INTERNAL_MASTER_ONLY', '1')
e = os.system('gpstart -m -d %s' % (os.environ.get('MASTER_DATA_DIRECTORY')))
ok = not e
if not ok:
self.logger.error('Failed to bring Greenplum Database up in management mode: segment recovery failed')
raise Exception("Failed to start GPDB in management mode.")
else:
self.logger.info('Greenplum Database restarted for configuration update')
# N - 2
# Update configuration
# Open a connection to the DB.
conn = None
try:
db_url = dbconn.DbURL(port=gpEnv.getMasterPort(), dbname='template1')
conn = dbconn.connect(db_url, utility=True)
dbconn.execSQL(conn, "BEGIN")
self.logger.debug('Starting Transaction')
# Update gp_san_configuration
for mount_id, v in mount_done.iteritems():
self.logger.debug('Checking Mount id %d' % mount_id)
if v:
sql = 'UPDATE gp_san_configuration SET active_host=\'p\' WHERE mountid=%d' % mount_id
self.logger.debug('Issuing SQL [%s]' % sql)
dbconn.executeUpdateOrInsert(conn, sql, 1)
history_message = "GPRECOVERSEG: san-mount-id %d set active_host to primary" % (mount_id)
sql = 'INSERT INTO gp_configuration_history values (now(), -1, \'%s\')' % history_message
self.logger.debug('Issuing SQL [%s]' % sql)
dbconn.executeUpdateOrInsert(conn, sql, 1)
# Update gp_segment_configuration
for dbid, v in recoverable_segs.iteritems():
(status, content, mountlist) = v
self.logger.debug('Checking dbid id %d' % dbid)
all_mountpoints = True
for mount_id, v in mount_done.iteritems():
self.logger.debug('Scanning mountid %d in dbid id %d' % (mount_id, dbid))
if not v:
self.logger.debug('Mountid %d --> False' % mount_id)
all_mountpoints = False
else:
self.logger.debug('Mountid %d --> True' % mount_id)
if all_mountpoints:
sql = 'UPDATE gp_segment_configuration SET status = \'u\' where dbid = %d' % dbid
self.logger.debug('Issuing SQL [%s]' % sql)
dbconn.executeUpdateOrInsert(conn, sql, 1)
sql = 'UPDATE gp_segment_configuration SET role = preferred_role where content = %d' % content
self.logger.debug('Issuing SQL [%s]' % sql)
dbconn.executeUpdateOrInsert(conn, sql, 2)
history_message = "GPRECOVERSEG: content %d, dbid %d moved to primary host" % (content, dbid)
sql = 'INSERT INTO gp_configuration_history values (now(), %d, \'%s\')' % (dbid, history_message)
self.logger.debug('Issuing SQL [%s]' % sql)
dbconn.executeUpdateOrInsert(conn, sql, 1)
else:
self.logger.info('Failed to recover sufficient mountpoints for dbid %d' % dbid)
self.logger.debug('Committing our updates.')
dbconn.execSQL(conn, "COMMIT")
finally:
if conn:
conn.close()
# N - 1
# Stop GPDB-admin-mode
e = os.system('gpstop -m -d %s' % (os.environ.get('MASTER_DATA_DIRECTORY')))
ok = not e
if not ok:
self.logger.error('Failed to stop Greenplum Database up in management mode: segment recovery failed')
raise Exception("Failed to stop GPDB, from management mode.")
else:
self.logger.info('Greenplum Database stopped, preparing for full restart.')
# N Start GPDB
e = os.system('gpstart -aq -d %s' % (os.environ.get('MASTER_DATA_DIRECTORY')))
ok = not e
if not ok:
self.logger.error('Failed to restart Greenplum Database: segment recovery failed')
raise Exception("Failed to restart GPDB.")
else:
self.logger.info('Successfully restarted the Greenplum Database')
configInterface.getConfigurationProvider().sendPgElogFromMaster( "SAN recovery has completed.", True)
return 0
def SanFailback_mountpoint(self, mp_id, mp_dict):
active = mp_dict['active']
type = mp_dict['type']
if active == 'm':
oldhost = mp_dict['primaryhost']
old_mp = mp_dict['primarymountpoint']
newhost = mp_dict['mirrorhost']
new_mp = mp_dict['mirrormountpoint']
else:
# failback unnecessary ?
self.logger.info('Not failback required for mount id %d primary is active!' % mp_id)
return 0
# RUN GP_MOUNT_AGENT HERE ??
command = 'gp_mount_agent --agent -u -t %c -a %c -p %s -d %s -m %s -q %s -e %s -n %s' % (mp_dict['type'], mp_dict['active'],
mp_dict['primaryhost'], mp_dict['primarydevice'], mp_dict['primarymountpoint'],
mp_dict['mirrorhost'], mp_dict['mirrordevice'], mp_dict['mirrormountpoint'])
self.logger.debug('gp_mount_agent command is \'%s\'' % command)
e = os.system(command)
ok = not e
if not ok:
self.logger.error('gp_mount_agent: failed to relocate mount point %d' % mp_id)
raise Exception("Failed to relocate mountpoint for mount id %d" % mp_id)
return 0
def getRecoveryActionsBasedOnOptions(self, gpEnv, gpArray):
if self.__options.rebalanceSegments:
return GpSegmentRebalanceOperation(gpEnv, gpArray)
elif self.__options.recoveryConfigFile is not None:
return self.getRecoveryActionsFromConfigFile(gpArray)
else:
return self.getRecoveryActionsFromConfiguration(gpEnv, gpArray)
def syncPackages(self, new_hosts):
# The design decision here is to squash any exceptions resulting from the
# synchronization of packages. We should *not* disturb the user's attempts to recover.
try:
logger.info('Syncing Greenplum Database extensions')
operations = [ SyncPackages(host) for host in new_hosts ]
ParallelOperation(operations, self.__options.parallelDegree).run()
# introspect outcomes
for operation in operations:
operation.get_ret()
except Exception, e:
logger.exception('Syncing of Greenplum Database extensions has failed.')
logger.warning('Please run gppkg --clean after successful segment recovery.')
def displayRecovery(self, mirrorBuilder, gpArray):
self.logger.info('Greenplum instance recovery parameters')
self.logger.info('---------------------------------------------------------')
if self.__options.recoveryConfigFile:
self.logger.info('Recovery from configuration -i option supplied')
elif self.__options.newRecoverHosts is not None:
self.logger.info('Recovery type = Pool Host')
for h in self.__options.newRecoverHosts:
self.logger.info('Pool host for recovery = %s' % h)
type_text = 'Pool '
elif self.__options.spareDataDirectoryFile is not None:
self.logger.info('Recovery type = Pool Directory')
self.logger.info('Mirror pool directory file = %s' % self.__options.spareDataDirectoryFile)
type_text = 'Pool dir'
elif self.__options.rebalanceSegments:
self.logger.info('Recovery type = Rebalance')
type_text = 'Rebalance segments'
else:
self.logger.info('Recovery type = Standard')
type_text = 'Failed '
if self.__options.rebalanceSegments:
i = 1
total = len(gpArray.get_unbalanced_segdbs())
for toRebalance in gpArray.get_unbalanced_segdbs():
tabLog = TableLogger()
self.logger.info('---------------------------------------------------------')
self.logger.info('Unbalanced segment %d of %d' % (i, total))
self.logger.info('---------------------------------------------------------')
programIoUtils.appendSegmentInfoForOutput("Unbalanced", gpArray, toRebalance, tabLog)
tabLog.info(["Balanced role", "= Primary" if toRebalance.preferred_role == 'p' else "= Mirror"])
tabLog.info(["Current role", "= Primary" if toRebalance.role == 'p' else "= Mirror"])
tabLog.outputTable()
i+=1
else:
count = 0
# self.logger.info('Recovery parallel batch value = %d' % opt['-B'])
i = 0
total = len(mirrorBuilder.getMirrorsToBuild())
for toRecover in mirrorBuilder.getMirrorsToBuild():
self.logger.info('---------------------------------------------------------')
self.logger.info('Recovery %d of %d' % (i+1, total))
self.logger.info('---------------------------------------------------------')
tabLog = TableLogger()
# syncMode = "Full" if toRecover.isFullSynchronization() else "Incremental"
# tabLog.info(["Synchronization mode", "= " + syncMode])
programIoUtils.appendSegmentInfoForOutput("Failed", gpArray, toRecover.getFailedSegment(), tabLog)
# programIoUtils.appendSegmentInfoForOutput("Recovery Source", gpArray, toRecover.getLiveSegment(), tabLog)
if toRecover.getFailoverSegment() is not None:
programIoUtils.appendSegmentInfoForOutput("Recovery Target", gpArray, toRecover.getFailoverSegment(), tabLog)
else:
tabLog.info(["Recovery Target", "= in-place"])
tabLog.outputTable()
i = i + 1
self.logger.info('---------------------------------------------------------')
def __getSimpleSegmentLabel(self, seg):
addr = canonicalize_address( seg.getSegmentAddress() )
return "%s:%s" % ( addr, seg.getSegmentDataDirectory())
def __displayRecoveryWarnings(self, mirrorBuilder):
for warning in self._getRecoveryWarnings(mirrorBuilder):
self.logger.warn(warning)
def _getRecoveryWarnings(self, mirrorBuilder):
"""
return an array of string warnings regarding the recovery
"""
res = []
for toRecover in mirrorBuilder.getMirrorsToBuild():
if toRecover.getFailoverSegment() is not None:
#
# user specified a failover location -- warn if it's the same host as its primary
#
src = toRecover.getLiveSegment()
dest = toRecover.getFailoverSegment()
if src.getSegmentHostName() == dest.getSegmentHostName():
res.append("Segment is being recovered to the same host as its primary: "
"primary %s failover target: %s"
% (self.__getSimpleSegmentLabel(src), self.__getSimpleSegmentLabel(dest)))
for warning in mirrorBuilder.getAdditionalWarnings():
res.append(warning)
return res
def run(self):
if self.__options.parallelDegree < 1 or self.__options.parallelDegree > 64:
raise ProgramArgumentValidationException("Invalid parallelDegree provided with -B argument: %d" % self.__options.parallelDegree)
self.__pool = base.WorkerPool(self.__options.parallelDegree)
gpEnv = GpMasterEnvironment(self.__options.masterDataDirectory, True)
# verify "where to recover" options
optionCnt = 0
if self.__options.newRecoverHosts is not None:
optionCnt += 1
if self.__options.spareDataDirectoryFile is not None:
optionCnt += 1
if self.__options.recoveryConfigFile is not None:
optionCnt += 1
if self.__options.outputSpareDataDirectoryFile is not None:
optionCnt += 1
if self.__options.rebalanceSegments:
optionCnt += 1
if optionCnt > 1:
raise ProgramArgumentValidationException(\
"Only one of -i, -p, -s, -r, and -S may be specified")
faultProberInterface.getFaultProber().initializeProber(gpEnv.getMasterPort())
confProvider = configInterface.getConfigurationProvider().initializeProvider(gpEnv.getMasterPort())
gpArray = confProvider.loadSystemConfig(useUtilityMode=False)
# Make sure gpArray and segments are in agreement on current state of system.
segmentList = gpArray.getSegDbList()
getVersionCmds = {}
for seg in segmentList:
if seg.isSegmentQD() == True:
continue
if seg.isSegmentModeInChangeLogging() == False:
continue
cmd = gp.SendFilerepTransitionStatusMessage( name = "Get segment status information"
, msg = gp.SEGMENT_STATUS_GET_STATUS
, dataDir = seg.getSegmentDataDirectory()
, port = seg.getSegmentPort()
, ctxt = gp.REMOTE
, remoteHost = seg.getSegmentHostName()
)
getVersionCmds[seg.getSegmentDbId()] = cmd
self.__pool.addCommand(cmd)
self.__pool.join()
# We can not check to see if the command was successful or not, because gp_primarymirror always returns a non-zero result.
# That is just the way gp_primarymirror was designed.
dbsMap = gpArray.getSegDbMap()
for dbid in getVersionCmds:
cmd = getVersionCmds[dbid]
mode = None
segmentState = None
dataState = None
try:
lines = str(cmd.get_results().stderr).split("\n")
mode = lines[0].split(": ")[1].strip()
segmentState = lines[1].split(": ")[1].strip()
dataState = lines[2].split(": ")[1].strip()
except Exception, e:
self.logger.warning("Problem getting Segment state dbid = %s, results = %s." % (str(dbid), str(cmd.get_results().stderr)))
continue
db = dbsMap[dbid]
if gparray.ROLE_TO_MODE_MAP[db.getSegmentRole()] != mode:
raise Exception("Inconsistency in catalog and segment Role/Mode. Catalog Role = %s. Segment Mode = %s." % (db.getSegmentRole(), mode))
if gparray.MODE_TO_DATA_STATE_MAP[db.getSegmentMode()] != dataState:
raise Exception("Inconsistency in catalog and segment Mode/DataState. Catalog Mode = %s. Segment DataState = %s." % (db.getSegmentMode(), dataState))
if segmentState != gparray.SEGMENT_STATE_READY and segmentState != gparray.SEGMENT_STATE_CHANGE_TRACKING_DISABLED:
if segmentState == gparray.SEGMENT_STATE_INITIALIZATION or segmentState == gparray.SEGMENT_STATE_IN_CHANGE_TRACKING_TRANSITION:
raise Exception("Segment is not ready for recovery dbid = %s, segmentState = %s. Retry recovery in a few moments" % (str(db.getSegmentDbId()), segmentState))
else:
raise Exception("Segment is in unexpected state. dbid = %s, segmentState = %s." % (str(db.getSegmentDbId()), segmentState))
# check that we actually have mirrors
if gpArray.getFaultStrategy() == gparray.FAULT_STRATEGY_SAN:
self.SanFailback(gpArray, gpEnv)
return 0
elif gpArray.getFaultStrategy() == gparray.FAULT_STRATEGY_NONE:
self.GPSQLFailback(gpArray, gpEnv)
return 0
elif gpArray.getFaultStrategy() != gparray.FAULT_STRATEGY_FILE_REPLICATION:
raise ExceptionNoStackTraceNeeded( \
'GPDB Mirroring replication is not configured for this Greenplum Database instance.')
# We have phys-rep/filerep mirrors.
if self.__options.outputSpareDataDirectoryFile is not None:
self.__outputSpareDataDirectoryFile(gpEnv, gpArray, self.__options.outputSpareDataDirectoryFile)
return 0
if self.__options.newRecoverHosts is not None:
try:
uniqueHosts = []
[uniqueHosts.append(h.strip()) for h in self.__options.newRecoverHosts.split(',') \
if h.strip() not in uniqueHosts ]
self.__options.newRecoverHosts = uniqueHosts
except Exception, ex:
raise ProgramArgumentValidationException(\
"Invalid value for recover hosts: %s" % ex)
# If it's a rebalance operation, make sure we are in an acceptable state to do that
# Acceptable state is:
# - No segments down
# - No segments in change tracking or unsynchronized state
if self.__options.rebalanceSegments:
if len(gpArray.get_invalid_segdbs()) > 0:
raise Exception("Down segments still exist. All segments must be up to rebalance.")
if len(gpArray.get_synchronized_segdbs()) != len(gpArray.getSegDbList()):
raise Exception("Some segments are not yet synchronized. All segments must be synchronized to rebalance.")
# retain list of hosts that were existing in the system prior to getRecoverActions...
# this will be needed for later calculations that determine whether
# new hosts were added into the system
existing_hosts = set(gpArray.getHostList())
# figure out what needs to be done
mirrorBuilder = self.getRecoveryActionsBasedOnOptions(gpEnv, gpArray)
if self.__options.outputSampleConfigFile is not None:
# just output config file and done
self.outputToFile(mirrorBuilder, gpArray, self.__options.outputSampleConfigFile)
self.logger.info('Configuration file output to %s successfully.' % self.__options.outputSampleConfigFile)
elif self.__options.rebalanceSegments:
assert(isinstance(mirrorBuilder,GpSegmentRebalanceOperation))
# Make sure we have work to do
if len(gpArray.get_unbalanced_segdbs()) == 0:
self.logger.info("No segments are running in their non-preferred role and need to be rebalanced.")
else:
self.displayRecovery(mirrorBuilder, gpArray)
if self.__options.interactive:
self.logger.warn("This operation will cancel queries that are currently executing.")
self.logger.warn("Connections to the database however will not be interrupted.")
if not userinput.ask_yesno(None, "\nContinue with segment rebalance procedure", 'N'):
raise UserAbortedException()
mirrorBuilder.rebalance()
self.logger.info("******************************************************************")
self.logger.info("The rebalance operation has completed successfully.")
self.logger.info("There is a resynchronization running in the background to bring all")
self.logger.info("segments in sync.")
self.logger.info("")
self.logger.info("Use gpstate -s to check the resynchronization progress.")
self.logger.info("******************************************************************")
elif len(mirrorBuilder.getMirrorsToBuild()) == 0:
self.logger.info('No segments to recover')
else:
mirrorBuilder.checkForPortAndDirectoryConflicts(gpArray)
self.displayRecovery(mirrorBuilder, gpArray)
self.__displayRecoveryWarnings(mirrorBuilder)
if self.__options.interactive:
if not userinput.ask_yesno(None, "\nContinue with segment recovery procedure", 'N'):
raise UserAbortedException()
# sync packages
current_hosts = set(gpArray.getHostList())
new_hosts = current_hosts - existing_hosts
if new_hosts:
self.syncPackages(new_hosts)
mirrorBuilder.buildMirrors("recover", gpEnv, gpArray )
confProvider.sendPgElogFromMaster("Recovery of %d segment(s) has been started." % \
len(mirrorBuilder.getMirrorsToBuild()), True)
self.logger.info("******************************************************************")
self.logger.info("Updating segments for resynchronization is completed.")
self.logger.info("For segments updated successfully, resynchronization will continue in the background.")
self.logger.info("")
self.logger.info("Use gpstate -s to check the resynchronization progress.")
self.logger.info("******************************************************************")
return 0 # success -- exit code 0!
def cleanup(self):
if self.__pool:
self.__pool.haltWork() # \ MPP-13489, CR-2572
self.__pool.joinWorkers() # > all three of these appear necessary
self.__pool.join() # / see MPP-12633, CR-2252 as well
#-------------------------------------------------------------------------
@staticmethod
def createParser():
description = ("Recover a failed segment")
help = [""]
parser = OptParser(option_class=OptChecker,
description=' '.join(description.split()),
version='%prog version $Revision$')
parser.setHelp(help)
addStandardLoggingAndHelpOptions(parser, True)
addTo = OptionGroup(parser, "Connection Options")
parser.add_option_group(addTo)
addMasterDirectoryOptionForSingleClusterProgram(addTo)
addTo = OptionGroup(parser, "Recovery Source Options")
parser.add_option_group(addTo)
addTo.add_option("-i", None, type="string",
dest="recoveryConfigFile",
metavar="<configFile>",
help="Recovery configuration file")
addTo.add_option("-o", None,
dest="outputSampleConfigFile",
metavar="<configFile>", type="string",
help="Sample configuration file name to output; "
"this file can be passed to a subsequent call using -i option")
addTo = OptionGroup(parser, "Recovery Destination Options")
parser.add_option_group(addTo)
addTo.add_option("-p", None, type="string",
dest="newRecoverHosts",
metavar="<targetHosts>",
help="Spare new hosts to which to recover segments")
addTo.add_option("-s", None, type="string",
dest="spareDataDirectoryFile",
metavar="<spareDataDirectoryFile>",
help="File listing spare data directories (in filespaceName=path format) on current hosts")
addTo.add_option("-S", None, type="string",
dest="outputSpareDataDirectoryFile",
metavar="<outputSpareDataDirectoryFile>",
help="Write a sample file to be modified for use by -s <spareDirectoryFile> option")
addTo = OptionGroup(parser, "Recovery Options")
parser.add_option_group(addTo)
addTo.add_option('-F', None, default=False, action='store_true',
dest="forceFullResynchronization",
metavar="<forceFullResynchronization>",
help="Force full segment resynchronization")
addTo.add_option("-B", None, type="int", default=16,
dest="parallelDegree",
metavar="<parallelDegree>",
help="Max # of workers to use for building recovery segments. [default: %default]")
addTo.add_option("-r", None, default=False, action='store_true',
dest='rebalanceSegments', help='Rebalance synchronized segments.')
parser.set_defaults()
return parser
@staticmethod
def createProgram(options, args):
if len(args) > 0 :
raise ProgramArgumentValidationException(\
"too many arguments: only options may be specified", True)
return GpRecoverSegmentProgram(options)
@staticmethod
def mainOptions():
"""
The dictionary this method returns instructs the simple_main framework
to check for a gprecoverseg.pid file under MASTER_DATA_DIRECTORY to
prevent the customer from trying to run more than one instance of
gprecoverseg at the same time.
"""
return {'pidfilename':'gprecoverseg.pid', 'parentpidvar':'GPRECOVERPID'}
| zuowang/incubator-hawq | tools/bin/gppylib/programs/clsRecoverSegment.py | Python | apache-2.0 | 55,995 |
'''
Copyright 2017, Dell, Inc.
Author(s):
UCS test script that tests:
-All the ucs service APIs
-The Discovery workflow
-The Catalog workflow
'''
import fit_path # NOQA: unused import
import unittest
from common import fit_common
from nosedep import depends
import flogging
from nose.plugins.attrib import attr
from config.settings import get_ucs_cred
logs = flogging.get_loggers()
UCSM_IP = fit_common.fitcfg().get('ucsm_ip')
UCS_SERVICE_URI = fit_common.fitcfg().get('ucs_service_uri')
UCSM_USER, UCSM_PASS = get_ucs_cred()
@attr(all=True, regression=True, smoke=False, ucs=True)
class ucs_api(unittest.TestCase):
def ucs_url_factory(self, api, identifier=None):
"""
returns a fully qualified UCS API
:param api:UCS API
:param identifier: identify the ucs element in the catalog API
:return:
"""
if identifier is None:
url = UCS_SERVICE_URI + "/" + api
else:
url = UCS_SERVICE_URI + "/" + api + "?identifier=" + identifier
headers = {"ucs-user": UCSM_USER,
"ucs-password": UCSM_PASS,
"ucs-host": UCSM_IP}
return (url, headers)
@unittest.skipUnless("ucsm_ip" in fit_common.fitcfg(), "")
def test_check_ucs_params(self):
self.assertNotEqual(UCSM_IP, None, "Expected value for UCSM_IP other then None and found {0}"
.format(UCSM_IP))
self.assertNotEqual(UCS_SERVICE_URI, None,
"Expected value for UCS_SERVICE_URI other then None and found {0}"
.format(UCS_SERVICE_URI))
@depends(after=test_check_ucs_params)
def test_ucs_log_in(self):
"""
Test the /logIn ucs API
:return:
"""
url, headers = self.ucs_url_factory("login")
api_data = fit_common.restful(url, rest_headers=headers)
self.assertEqual(api_data['status'], 200,
'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
self.assertNotEqual(api_data["json"], None, "Expected a token to be returned on login and received None")
self.assertNotEqual(type(api_data["json"]), "unicode", "Unexpected Token was received on Login")
@depends(after=test_check_ucs_params)
def test_ucs_get_sys(self):
"""
Test the /sys ucs API
:return:
"""
url, headers = self.ucs_url_factory("sys")
api_data = fit_common.restful(url, rest_headers=headers)
self.assertEqual(api_data['status'], 200,
'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
self.assertIn("Fabric Interconnects", api_data["json"], "Results did not contain 'Fabric Interconnects'")
self.assertIn("Servers", api_data["json"], "Results did not contain 'Servers")
self.assertIn("FEX", api_data["json"], "Results did not contain 'FEX")
self.assertIn("Chassis", api_data["json"], "Results did not contain 'Chassis")
@depends(after=test_check_ucs_params)
def test_ucs_get_rackmount(self):
"""
Test the /rackmount ucs API
:return:
"""
url, headers = self.ucs_url_factory("rackmount")
api_data = fit_common.restful(url, rest_headers=headers)
self.assertEqual(api_data['status'], 200,
'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
self.assertGreater(len(api_data["json"]), 0, "Found zero Rackmounts")
# TO DO more in depth testing for the returned content such as mac validation, etc...
@depends(after=test_check_ucs_params)
def test_ucs_get_chassis(self):
"""
Test the /chassis ucs API
:return:
"""
url, headers = self.ucs_url_factory("chassis")
api_data = fit_common.restful(url, rest_headers=headers)
self.assertEqual(api_data['status'], 200,
'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
self.assertGreater(len(api_data["json"]), 0, "Zero chassis elements found")
# TO DO more in depth testing for the returned content such as mac validation, etc...
@depends(after=test_ucs_get_chassis)
def test_ucs_get_serviceProfile(self):
"""
Test the /serviceProfile ucs API
:return:
"""
url, headers = self.ucs_url_factory("serviceProfile")
api_data = fit_common.restful(url, rest_headers=headers)
self.assertEqual(api_data['status'], 200,
'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
if len(api_data["json"]["ServiceProfile"]["members"]) == 0:
raise unittest.SkipTest("No Service Profiles Defined")
# TO DO more in depth testing for the returned content such as mac validation, etc...
@depends(after=test_check_ucs_params)
def test_api_20_ucs_get_catalog(self):
"""
Test the /sys ucs API
:return:
"""
url, headers = self.ucs_url_factory("sys")
api_data = fit_common.restful(url, rest_headers=headers)
self.assertEqual(api_data['status'], 200,
'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
total_elements = 0
for elementTypes in api_data["json"]:
for element in api_data["json"][str(elementTypes)]:
url, headers = self.ucs_url_factory("catalog", identifier=element["relative_path"].split("/")[-1])
api_data_c = fit_common.restful(url, rest_headers=headers)
self.assertEqual(api_data_c['status'], 200,
'Incorrect HTTP return code, expected 200, got:' + str(api_data_c['status']))
total_elements += 1
self.assertGreater(total_elements, 0, "Zero catalog elements found")
# TO DO: deeper check on the catalog data
def check_all_server_power_state(self, state):
"""
Test to see if all Associated servers are in the specified state
:return: True or False
"""
url, headers = self.ucs_url_factory("serviceProfile")
api_data = fit_common.restful(url, rest_headers=headers)
self.assertEqual(api_data['status'], 200,
'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
total_elements = 0
for server in api_data["json"]["ServiceProfile"]["members"]:
if server["assoc_state"] == "associated":
url, headers = self.ucs_url_factory("power", identifier=str(server["path"]))
api_data_c = fit_common.restful(url, rest_headers=headers)
self.assertEqual(api_data_c['status'], 200,
'Incorrect HTTP return code, expected 200, got:' + str(api_data_c['status']))
self.assertEqual(api_data_c["json"]["serverState"], state,
'Server ' + str(server["path"]) + ' reported power state ' +
str(api_data_c["json"]["serverState"]) + ' expected: ' + state)
total_elements += 1
self.assertGreater(total_elements, 0, "Found zero elements")
def set_all_server_power_state(self, state):
"""
Use the POST /power ucs API to set the state of all servers
:return:
"""
url, headers = self.ucs_url_factory("serviceProfile")
api_data = fit_common.restful(url, rest_headers=headers)
self.assertEqual(api_data['status'], 200,
'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
total_elements = 0
for server in api_data["json"]["ServiceProfile"]["members"]:
if server["assoc_state"] == "associated":
url, headers = self.ucs_url_factory("power", identifier=str(server["path"]))
api_data_c = fit_common.restful(url + "&action=" + state, rest_headers=headers, rest_action='post')
self.assertEqual(api_data_c['status'], 200,
'Incorrect HTTP return code, expected 200, got:' + str(api_data_c['status']))
total_elements += 1
self.assertGreater(total_elements, 0, "Found zero elements")
@depends(after=test_ucs_get_serviceProfile)
def test_api_20_ucs_power(self):
"""
Test the GET and POST api for server power state
:return:
"""
# first power off all servers
self.set_all_server_power_state("off")
# verify power state is down
self.check_all_server_power_state("down")
# now power on the servers
self.set_all_server_power_state("on")
# verify power state is up
self.check_all_server_power_state("up")
if __name__ == '__main__':
unittest.main()
| johren/RackHD | test/tests/ucs/test_ucs_api.py | Python | apache-2.0 | 8,978 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging and Summary Operations."""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_logging_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_logging_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.deprecation import deprecated
# The python wrapper for Assert is in control_flow_ops, as the Assert
# call relies on certain conditionals for its dependencies. Use
# control_flow_ops.Assert.
# Assert and Print are special symbols in python, so we must
# use an upper-case version of them.
def Print(input_, data, message=None, first_n=None, summarize=None,
name=None):
"""Prints a list of tensors.
This is an identity op (behaves like `tf.identity`) with the side effect
of printing `data` when evaluating.
Note: This op prints to the standard error. It is not currently compatible
with jupyter notebook (printing to the notebook *server's* output, not into
the notebook).
Args:
input_: A tensor passed through this op.
data: A list of tensors to print out when op is evaluated.
message: A string, prefix of the error message.
first_n: Only log `first_n` number of times. Negative numbers log always;
this is the default.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and contents as `input_`.
"""
return gen_logging_ops._print(input_, data, message, first_n, summarize, name)
@ops.RegisterGradient("Print")
def _PrintGrad(op, *grad):
return list(grad) + [None] * (len(op.inputs) - 1)
def _Collect(val, collections, default_collections):
if collections is None:
collections = default_collections
for key in collections:
ops.add_to_collection(key, val)
@deprecated(
"2016-11-30", "Please switch to tf.summary.histogram. Note that "
"tf.summary.histogram uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in.")
def histogram_summary(tag, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
This ops is deprecated. Please switch to tf.summary.histogram.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "HistogramSummary", [tag, values]) as scope:
val = gen_logging_ops._histogram_summary(
tag=tag, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.image. Note that "
"tf.summary.image uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in. Also, the max_images "
"argument was renamed to max_outputs.")
def image_summary(tag, tensor, max_images=3, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with images.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_images` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_images` is 1, the summary value tag is '*tag*/image'.
* If `max_images` is greater than 1, the summary value tags are
generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_images: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ImageSummary", [tag, tensor]) as scope:
val = gen_logging_ops._image_summary(
tag=tag, tensor=tensor, max_images=max_images, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.audio. Note that "
"tf.summary.audio uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in.")
def audio_summary(tag,
tensor,
sample_rate,
max_outputs=3,
collections=None,
name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
This op is deprecated. Please switch to tf.summary.audio.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "AudioSummary", [tag, tensor]) as scope:
sample_rate = ops.convert_to_tensor(sample_rate, dtype=dtypes.float32,
name="sample_rate")
val = gen_logging_ops._audio_summary_v2(tag=tag,
tensor=tensor,
max_outputs=max_outputs,
sample_rate=sample_rate,
name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge.")
def merge_summary(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op is deprecated. Please switch to tf.summary.merge, which has identical
behavior.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
"""
with ops.name_scope(name, "MergeSummary", inputs):
val = gen_logging_ops._merge_summary(inputs=inputs, name=name)
_Collect(val, collections, [])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge_all.")
def merge_all_summaries(key=ops.GraphKeys.SUMMARIES):
"""Merges all summaries collected in the default graph.
This op is deprecated. Please switch to tf.summary.merge_all, which has
identical behavior.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_ops = ops.get_collection(key)
if not summary_ops:
return None
else:
return merge_summary(summary_ops)
def get_summary_op():
"""Returns a single Summary op that would run all summaries.
Either existing one from `SUMMARY_OP` collection or merges all existing
summaries.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is not None:
if summary_op:
summary_op = summary_op[0]
else:
summary_op = None
if summary_op is None:
summary_op = merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
return summary_op
@deprecated(
"2016-11-30", "Please switch to tf.summary.scalar. Note that "
"tf.summary.scalar uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in. Also, passing a "
"tensor or list of tags to a scalar summary op is no longer "
"supported.")
def scalar_summary(tags, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with scalar values.
This ops is deprecated. Please switch to tf.summary.scalar.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The input `tags` and `values` must have the same shape. The generated
summary has a summary value for each tag-value pair in `tags` and `values`.
Args:
tags: A `string` `Tensor`. Tags for the summaries.
values: A real numeric Tensor. Values for the summaries.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ScalarSummary", [tags, values]) as scope:
val = gen_logging_ops._scalar_summary(tags=tags, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
ops.NotDifferentiable("HistogramSummary")
ops.NotDifferentiable("ImageSummary")
ops.NotDifferentiable("AudioSummary")
ops.NotDifferentiable("AudioSummaryV2")
ops.NotDifferentiable("MergeSummary")
ops.NotDifferentiable("ScalarSummary")
| eadgarchen/tensorflow | tensorflow/python/ops/logging_ops.py | Python | apache-2.0 | 14,418 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.interactions import base
class EndExploration(base.BaseInteraction):
"""Interaction that allows the exploration to end.
This interaction is unusual in that there is no way for the learner to
submit an answer, so the exploration effectively terminates at the state
containing it.
"""
name = 'End Conversation'
description = (
'Suggests recommendations for explorations to try next.')
display_mode = base.DISPLAY_MODE_SUPPLEMENTAL
is_terminal = True
_dependency_ids = []
answer_type = 'Null'
_customization_arg_specs = [{
'name': 'recommendedExplorationIds',
'description': (
'IDs of explorations to recommend to the learner (maximum of 8). '
'(The ID of an exploration is the string of characters appearing '
'after \'/explore/\' in the URL bar.)'),
'schema': {
'type': 'list',
'items': {
'type': 'unicode',
},
'ui_config': {
'add_element_text': 'Add exploration ID',
}
},
'default_value': [],
}]
| kaffeel/oppia | extensions/interactions/EndExploration/EndExploration.py | Python | apache-2.0 | 1,767 |
from planout.experiment import SimpleExperiment
import psycopg2 as pg
from psycopg2.extras import Json as pJson
class PostgresLoggedExperiment(SimpleExperiment):
def configure_logger(self):
""" Sets up a logger to postgres.
1. Modify the connection_parameters variable to be a dictionary of the
parameters to create a connection to your postgres database.
2. Modify the table variable to be the table to which you plan on
logging.
"""
connection_parameters = {'host': 'localhost',
'database': 'experiments'}
table = 'experiments'
self.conn = pg.connect(**connection_parameters)
self.table = table
def log(self, data):
""" Log exposure. """
columns = ['inputs', 'name', 'checksum', 'params', 'time', 'salt',
'event']
names = ','.join(columns)
placeholders = ','.join(['%s']*len(columns))
ins_statement = ('insert into {} ({}) values ({})'
.format(self.table, names, placeholders))
row = []
for column in columns:
value = data[column]
row.append(pJson(value) if isinstance(value, dict) else value)
with self.conn.cursor() as curr:
curr.execute(ins_statement, row)
self.conn.commit()
| rawls238/planout | contrib/postgres_logger.py | Python | bsd-3-clause | 1,366 |
from app.config.cplog import CPLog
import cherrypy
import urllib
import urllib2
import telnetlib
import re
log = CPLog(__name__)
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
class NMJ:
host = ''
database = ''
mount = ''
def __init__(self):
self.enabled = self.conf('enabled');
self.host = self.conf('host')
self.database = self.conf('database')
self.mount = self.conf('mount')
pass
def conf(self, options):
return cherrypy.config['config'].get('NMJ', options)
def auto(self, host):
terminal = False
try:
terminal = telnetlib.Telnet(host)
except Exception:
log.error(u"Warning: unable to get a telnet session to %s" % (host))
return False
log.debug(u"Connected to %s via telnet" % (host))
terminal.read_until("sh-3.00# ")
terminal.write("cat /tmp/source\n")
terminal.write("cat /tmp/netshare\n")
terminal.write("exit\n")
tnoutput = terminal.read_all()
match = re.search(r"(.+\.db)\r\n?(.+)(?=sh-3.00# cat /tmp/netshare)", tnoutput)
if match:
database = match.group(1)
device = match.group(2)
log.info(u"Found NMJ database %s on device %s" % (database, device))
self.database = database
else:
log.error(u"Could not get current NMJ database on %s, NMJ is probably not running!" % (host))
return False
if device.startswith("NETWORK_SHARE/"):
match = re.search(".*(?=\r\n?%s)" % (re.escape(device[14:])), tnoutput)
if match:
mount = match.group().replace("127.0.0.1", host)
log.info(u"Found mounting url on the Popcorn Hour in configuration: %s" % (mount))
self.mount = mount
else:
log.error("Detected a network share on the Popcorn Hour, but could not get the mounting url")
return False
return '{"database": "%(database)s", "mount": "%(mount)s"}' % {"database": database, "mount": mount}
def notify(self, message):
#For uniformity reasons not removed
return
def updateLibrary(self):
if not self.enabled:
return False
if self.mount:
try:
req = urllib2.Request(self.mount)
log.debug(u"Try to mount network drive via url: %s" % (self.mount))
handle = urllib2.urlopen(req)
except IOError, e:
log.error(u"Warning: Couldn't contact popcorn hour on host %s: %s" % (self.host, e))
return False
params = {
"arg0": "scanner_start",
"arg1": self.database,
"arg2": "background",
"arg3": ""}
params = urllib.urlencode(params)
UPDATE_URL = "http://%(host)s:8008/metadata_database?%(params)s"
updateUrl = UPDATE_URL % {"host": self.host, "params": params}
try:
req = urllib2.Request(updateUrl)
log.debug(u"Sending NMJ scan update command via url: %s" % (updateUrl))
handle = urllib2.urlopen(req)
response = handle.read()
except IOError, e:
log.error(u"Warning: Couldn't contact Popcorn Hour on host %s: %s" % (host, e))
return False
try:
et = etree.fromstring(response)
result = et.findtext("returnValue")
except SyntaxError, e:
log.error(u"Unable to parse XML returned from the Popcorn Hour: %s" % (e))
return False
if int(result) > 0:
log.error(u"Popcorn Hour returned an errorcode: %s" % (result))
return False
else:
log.info("NMJ started background scan")
return True
def test(self, host, database, mount):
self.enabled = True
self.host = host
self.database = database
self.mount = mount
self.updateLibrary()
| brototyp/CouchPotato | app/lib/nmj.py | Python | gpl-3.0 | 4,088 |
import datetime
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
from django_jinja import library
from django.utils.http import urlencode
@library.global_function
def thisyear():
"""The current year."""
return datetime.date.today().year
@library.filter
def urlparams(url_, hash=None, **query):
"""Add a fragment and/or query paramaters to a URL.
New query params will be appended to exising parameters, except duplicate
names, which will be replaced.
"""
url = urlparse.urlparse(url_)
fragment = hash if hash is not None else url.fragment
# Use dict(parse_qsl) so we don't get lists of values.
query_dict = dict(urlparse.parse_qsl(url.query))
query_dict.update(query)
query_string = urlencode(
[(k, v) for k, v in query_dict.items() if v is not None])
new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,
query_string, fragment)
return new.geturl()
| mozilla/sugardough | {{ cookiecutter.project_name }}/{{ cookiecutter.project_name }}/base/templatetags/helpers.py | Python | apache-2.0 | 1,004 |
"""
A model for coal mining disasters data with a changepoint
switchpoint ~ U(0, 110)
early_mean ~ Exp(1.)
late_mean ~ Exp(1.)
disasters[t] ~ Po(early_mean if t <= switchpoint, late_mean otherwise)
"""
from pymc3 import *
import theano.tensor as t
from numpy import arange, array, ones, concatenate
from numpy.random import randint
from numpy.ma import masked_values
__all__ = ['disasters_data', 'switchpoint', 'early_mean', 'late_mean', 'rate',
'disasters']
# Time series of recorded coal mining disasters in the UK from 1851 to 1962
disasters_data = array([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, -1, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, -1, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
years = len(disasters_data)
masked_values = masked_values(disasters_data, value=-1)
with Model() as model:
# Prior for distribution of switchpoint location
switchpoint = DiscreteUniform('switchpoint', lower=0, upper=years)
# Priors for pre- and post-switch mean number of disasters
early_mean = Exponential('early_mean', lam=1.)
late_mean = Exponential('late_mean', lam=1.)
# Allocate appropriate Poisson rates to years before and after current
# switchpoint location
idx = arange(years)
rate = switch(switchpoint >= idx, early_mean, late_mean)
# Data likelihood
disasters = Poisson('disasters', rate, observed=masked_values)
def run(n=1000):
if n == "short":
n = 500
with model:
# Initial values for stochastic nodes
start = {'early_mean': 2., 'late_mean': 3.}
# Use slice sampler for means
step1 = Slice([early_mean, late_mean])
# Use Metropolis for switchpoint, since it accomodates discrete variables
step2 = Metropolis([switchpoint, disasters.missing_values ])
tr = sample(n, tune=500, start=start, step=[step1, step2])
summary(tr, vars=['disasters_missing'])
if __name__ == '__main__':
run()
| tyarkoni/pymc3 | pymc3/examples/disaster_model_missing.py | Python | apache-2.0 | 2,328 |
"""
Contingency table functions (:mod:`scipy.stats.contingency`)
============================================================
Functions for creating and analyzing contingency tables.
.. currentmodule:: scipy.stats.contingency
.. autosummary::
:toctree: generated/
chi2_contingency
relative_risk
crosstab
association
expected_freq
margins
"""
from functools import reduce
import math
import numpy as np
from ._stats_py import power_divergence
from ._relative_risk import relative_risk
from ._crosstab import crosstab
__all__ = ['margins', 'expected_freq', 'chi2_contingency', 'crosstab',
'association', 'relative_risk']
def margins(a):
"""Return a list of the marginal sums of the array `a`.
Parameters
----------
a : ndarray
The array for which to compute the marginal sums.
Returns
-------
margsums : list of ndarrays
A list of length `a.ndim`. `margsums[k]` is the result
of summing `a` over all axes except `k`; it has the same
number of dimensions as `a`, but the length of each axis
except axis `k` will be 1.
Examples
--------
>>> a = np.arange(12).reshape(2, 6)
>>> a
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11]])
>>> from scipy.stats.contingency import margins
>>> m0, m1 = margins(a)
>>> m0
array([[15],
[51]])
>>> m1
array([[ 6, 8, 10, 12, 14, 16]])
>>> b = np.arange(24).reshape(2,3,4)
>>> m0, m1, m2 = margins(b)
>>> m0
array([[[ 66]],
[[210]]])
>>> m1
array([[[ 60],
[ 92],
[124]]])
>>> m2
array([[[60, 66, 72, 78]]])
"""
margsums = []
ranged = list(range(a.ndim))
for k in ranged:
marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k])
margsums.append(marg)
return margsums
def expected_freq(observed):
"""
Compute the expected frequencies from a contingency table.
Given an n-dimensional contingency table of observed frequencies,
compute the expected frequencies for the table based on the marginal
sums under the assumption that the groups associated with each
dimension are independent.
Parameters
----------
observed : array_like
The table of observed frequencies. (While this function can handle
a 1-D array, that case is trivial. Generally `observed` is at
least 2-D.)
Returns
-------
expected : ndarray of float64
The expected frequencies, based on the marginal sums of the table.
Same shape as `observed`.
Examples
--------
>>> from scipy.stats.contingency import expected_freq
>>> observed = np.array([[10, 10, 20],[20, 20, 20]])
>>> expected_freq(observed)
array([[ 12., 12., 16.],
[ 18., 18., 24.]])
"""
# Typically `observed` is an integer array. If `observed` has a large
# number of dimensions or holds large values, some of the following
# computations may overflow, so we first switch to floating point.
observed = np.asarray(observed, dtype=np.float64)
# Create a list of the marginal sums.
margsums = margins(observed)
# Create the array of expected frequencies. The shapes of the
# marginal sums returned by apply_over_axes() are just what we
# need for broadcasting in the following product.
d = observed.ndim
expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1)
return expected
def chi2_contingency(observed, correction=True, lambda_=None):
"""Chi-square test of independence of variables in a contingency table.
This function computes the chi-square statistic and p-value for the
hypothesis test of independence of the observed frequencies in the
contingency table [1]_ `observed`. The expected frequencies are computed
based on the marginal sums under the assumption of independence; see
`scipy.stats.contingency.expected_freq`. The number of degrees of
freedom is (expressed using numpy functions and attributes)::
dof = observed.size - sum(observed.shape) + observed.ndim - 1
Parameters
----------
observed : array_like
The contingency table. The table contains the observed frequencies
(i.e. number of occurrences) in each category. In the two-dimensional
case, the table is often described as an "R x C table".
correction : bool, optional
If True, *and* the degrees of freedom is 1, apply Yates' correction
for continuity. The effect of the correction is to adjust each
observed value by 0.5 towards the corresponding expected value.
lambda_ : float or str, optional
By default, the statistic computed in this test is Pearson's
chi-squared statistic [2]_. `lambda_` allows a statistic from the
Cressie-Read power divergence family [3]_ to be used instead. See
`scipy.stats.power_divergence` for details.
Returns
-------
chi2 : float
The test statistic.
p : float
The p-value of the test
dof : int
Degrees of freedom
expected : ndarray, same shape as `observed`
The expected frequencies, based on the marginal sums of the table.
See Also
--------
scipy.stats.contingency.expected_freq
scipy.stats.fisher_exact
scipy.stats.chisquare
scipy.stats.power_divergence
scipy.stats.barnard_exact
scipy.stats.boschloo_exact
Notes
-----
An often quoted guideline for the validity of this calculation is that
the test should be used only if the observed and expected frequencies
in each cell are at least 5.
This is a test for the independence of different categories of a
population. The test is only meaningful when the dimension of
`observed` is two or more. Applying the test to a one-dimensional
table will always result in `expected` equal to `observed` and a
chi-square statistic equal to 0.
This function does not handle masked arrays, because the calculation
does not make sense with missing values.
Like stats.chisquare, this function computes a chi-square statistic;
the convenience this function provides is to figure out the expected
frequencies and degrees of freedom from the given contingency table.
If these were already known, and if the Yates' correction was not
required, one could use stats.chisquare. That is, if one calls::
chi2, p, dof, ex = chi2_contingency(obs, correction=False)
then the following is true::
(chi2, p) == stats.chisquare(obs.ravel(), f_exp=ex.ravel(),
ddof=obs.size - 1 - dof)
The `lambda_` argument was added in version 0.13.0 of scipy.
References
----------
.. [1] "Contingency table",
https://en.wikipedia.org/wiki/Contingency_table
.. [2] "Pearson's chi-squared test",
https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
.. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
A two-way example (2 x 3):
>>> from scipy.stats import chi2_contingency
>>> obs = np.array([[10, 10, 20], [20, 20, 20]])
>>> chi2_contingency(obs)
(2.7777777777777777,
0.24935220877729619,
2,
array([[ 12., 12., 16.],
[ 18., 18., 24.]]))
Perform the test using the log-likelihood ratio (i.e. the "G-test")
instead of Pearson's chi-squared statistic.
>>> g, p, dof, expctd = chi2_contingency(obs, lambda_="log-likelihood")
>>> g, p
(2.7688587616781319, 0.25046668010954165)
A four-way example (2 x 2 x 2 x 2):
>>> obs = np.array(
... [[[[12, 17],
... [11, 16]],
... [[11, 12],
... [15, 16]]],
... [[[23, 15],
... [30, 22]],
... [[14, 17],
... [15, 16]]]])
>>> chi2_contingency(obs)
(8.7584514426741897,
0.64417725029295503,
11,
array([[[[ 14.15462386, 14.15462386],
[ 16.49423111, 16.49423111]],
[[ 11.2461395 , 11.2461395 ],
[ 13.10500554, 13.10500554]]],
[[[ 19.5591166 , 19.5591166 ],
[ 22.79202844, 22.79202844]],
[[ 15.54012004, 15.54012004],
[ 18.10873492, 18.10873492]]]]))
"""
observed = np.asarray(observed)
if np.any(observed < 0):
raise ValueError("All values in `observed` must be nonnegative.")
if observed.size == 0:
raise ValueError("No data; `observed` has size 0.")
expected = expected_freq(observed)
if np.any(expected == 0):
# Include one of the positions where expected is zero in
# the exception message.
zeropos = list(zip(*np.nonzero(expected == 0)))[0]
raise ValueError("The internally computed table of expected "
"frequencies has a zero element at %s." % (zeropos,))
# The degrees of freedom
dof = expected.size - sum(expected.shape) + expected.ndim - 1
if dof == 0:
# Degenerate case; this occurs when `observed` is 1D (or, more
# generally, when it has only one nontrivial dimension). In this
# case, we also have observed == expected, so chi2 is 0.
chi2 = 0.0
p = 1.0
else:
if dof == 1 and correction:
# Adjust `observed` according to Yates' correction for continuity.
# Magnitude of correction no bigger than difference; see gh-13875
diff = expected - observed
direction = np.sign(diff)
magnitude = np.minimum(0.5, np.abs(diff))
observed = observed + magnitude * direction
chi2, p = power_divergence(observed, expected,
ddof=observed.size - 1 - dof, axis=None,
lambda_=lambda_)
return chi2, p, dof, expected
def association(observed, method="cramer", correction=False, lambda_=None):
"""Calculates degree of association between two nominal variables.
The function provides the option for computing one of three measures of
association between two nominal variables from the data given in a 2d
contingency table: Tschuprow's T, Pearson's Contingency Coefficient
and Cramer's V.
Parameters
----------
observed : array-like
The array of observed values
method : {"cramer", "tschuprow", "pearson"} (default = "cramer")
The association test statistic.
correction : bool, optional
Inherited from `scipy.stats.contingency.chi2_contingency()`
lambda_ : float or str, optional
Inherited from `scipy.stats.contingency.chi2_contingency()`
Returns
-------
statistic : float
Value of the test statistic
Notes
-----
Cramer's V, Tschuprow's T and Pearson's Contingency Coefficient, all
measure the degree to which two nominal or ordinal variables are related,
or the level of their association. This differs from correlation, although
many often mistakenly consider them equivalent. Correlation measures in
what way two variables are related, whereas, association measures how
related the variables are. As such, association does not subsume
independent variables, and is rather a test of independence. A value of
1.0 indicates perfect association, and 0.0 means the variables have no
association.
Both the Cramer's V and Tschuprow's T are extensions of the phi
coefficient. Moreover, due to the close relationship between the
Cramer's V and Tschuprow's T the returned values can often be similar
or even equivalent. They are likely to diverge more as the array shape
diverges from a 2x2.
References
----------
.. [1] "Tschuprow's T",
https://en.wikipedia.org/wiki/Tschuprow's_T
.. [2] Tschuprow, A. A. (1939)
Principles of the Mathematical Theory of Correlation;
translated by M. Kantorowitsch. W. Hodge & Co.
.. [3] "Cramer's V", https://en.wikipedia.org/wiki/Cramer's_V
.. [4] "Nominal Association: Phi and Cramer's V",
http://www.people.vcu.edu/~pdattalo/702SuppRead/MeasAssoc/NominalAssoc.html
.. [5] Gingrich, Paul, "Association Between Variables",
http://uregina.ca/~gingrich/ch11a.pdf
Examples
--------
An example with a 4x2 contingency table:
>>> from scipy.stats.contingency import association
>>> obs4x2 = np.array([[100, 150], [203, 322], [420, 700], [320, 210]])
Pearson's contingency coefficient
>>> association(obs4x2, method="pearson")
0.18303298140595667
Cramer's V
>>> association(obs4x2, method="cramer")
0.18617813077483678
Tschuprow's T
>>> association(obs4x2, method="tschuprow")
0.14146478765062995
"""
arr = np.asarray(observed)
if not np.issubdtype(arr.dtype, np.integer):
raise ValueError("`observed` must be an integer array.")
if len(arr.shape) != 2:
raise ValueError("method only accepts 2d arrays")
chi2_stat = chi2_contingency(arr, correction=correction,
lambda_=lambda_)
phi2 = chi2_stat[0] / arr.sum()
n_rows, n_cols = arr.shape
if method == "cramer":
value = phi2 / min(n_cols - 1, n_rows - 1)
elif method == "tschuprow":
value = phi2 / math.sqrt((n_rows - 1) * (n_cols - 1))
elif method == 'pearson':
value = phi2 / (1 + phi2)
else:
raise ValueError("Invalid argument value: 'method' argument must "
"be 'cramer', 'tschuprow', or 'pearson'")
return math.sqrt(value)
| anntzer/scipy | scipy/stats/contingency.py | Python | bsd-3-clause | 13,835 |
#!/usr/bin/env python2
# Copyright (c) 2013-2014 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Andreas Hansson
# This script is used to dump protobuf instruction traces to ASCII
# format. It assumes that protoc has been executed and already
# generated the Python package for the inst messages. This can
# be done manually using:
# protoc --python_out=. inst.proto
# The ASCII trace format uses one line per request.
import protolib
import sys
# Import the packet proto definitions
try:
import inst_pb2
except:
print "Did not find protobuf inst definitions, attempting to generate"
from subprocess import call
error = call(['protoc', '--python_out=util', '--proto_path=src/proto',
'src/proto/inst.proto'])
if not error:
print "Generated inst proto definitions"
try:
import google.protobuf
except:
print "Please install Python protobuf module"
exit(-1)
import inst_pb2
else:
print "Failed to import inst proto definitions"
exit(-1)
def main():
if len(sys.argv) != 3:
print "Usage: ", sys.argv[0], " <protobuf input> <ASCII output>"
exit(-1)
# Open the file in read mode
proto_in = protolib.openFileRd(sys.argv[1])
try:
ascii_out = open(sys.argv[2], 'w')
except IOError:
print "Failed to open ", sys.argv[2], " for writing"
exit(-1)
# Read the magic number in 4-byte Little Endian
magic_number = proto_in.read(4)
if magic_number != "gem5":
print "Unrecognized file", sys.argv[1]
exit(-1)
print "Parsing instruction header"
# Add the packet header
header = inst_pb2.InstHeader()
protolib.decodeMessage(proto_in, header)
print "Object id:", header.obj_id
print "Tick frequency:", header.tick_freq
print "Memory addresses included:", header.has_mem
if header.ver != 0:
print "Warning: file version newer than decoder:", header.ver
print "This decoder may not understand how to decode this file"
print "Parsing instructions"
num_insts = 0
inst = inst_pb2.Inst()
# Decode the inst messages until we hit the end of the file
optional_fields = ('tick', 'type', 'inst_flags', 'addr', 'size', 'mem_flags')
while protolib.decodeMessage(proto_in, inst):
# If we have a tick use it, otherwise count instructions
if inst.HasField('tick'):
tick = inst.tick
else:
tick = num_insts
if inst.HasField('nodeid'):
node_id = inst.nodeid
else:
node_id = 0;
if inst.HasField('cpuid'):
cpu_id = inst.cpuid
else:
cpu_id = 0;
ascii_out.write('%-20d: (%03d/%03d) %#010x @ %#016x ' % (tick, node_id, cpu_id,
inst.inst, inst.pc))
if inst.HasField('type'):
ascii_out.write(' : %10s' % inst_pb2._INST_INSTTYPE.values_by_number[inst.type].name)
for mem_acc in inst.mem_access:
ascii_out.write(" %#x-%#x;" % (mem_acc.addr, mem_acc.addr + mem_acc.size))
ascii_out.write('\n')
num_insts += 1
print "Parsed instructions:", num_insts
# We're done
ascii_out.close()
proto_in.close()
if __name__ == "__main__":
main()
| HwisooSo/gemV-update | util/decode_inst_trace.py | Python | bsd-3-clause | 5,387 |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova.api.openstack.compute.plugins.v3 import migrations
from nova import context
from nova import exception
from nova.objects import base
from nova.objects import migration
from nova.openstack.common.fixture import moxstubout
from nova import test
fake_migrations = [
{
'id': 1234,
'source_node': 'node1',
'dest_node': 'node2',
'source_compute': 'compute1',
'dest_compute': 'compute2',
'dest_host': '1.2.3.4',
'status': 'Done',
'instance_uuid': 'instance_id_123',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'deleted_at': None,
'deleted': False
},
{
'id': 5678,
'source_node': 'node10',
'dest_node': 'node20',
'source_compute': 'compute10',
'dest_compute': 'compute20',
'dest_host': '5.6.7.8',
'status': 'Done',
'instance_uuid': 'instance_id_456',
'old_instance_type_id': 5,
'new_instance_type_id': 6,
'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'deleted_at': None,
'deleted': False
}
]
migrations_obj = base.obj_make_list(
'fake-context',
migration.MigrationList(),
migration.Migration,
fake_migrations)
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
GET = {}
class MigrationsTestCase(test.NoDBTestCase):
def setUp(self):
"""Run before each test."""
super(MigrationsTestCase, self).setUp()
self.controller = migrations.MigrationsController()
self.context = context.get_admin_context()
self.req = FakeRequest()
self.req.environ['nova.context'] = self.context
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = mox_fixture.mox
def test_index(self):
migrations_in_progress = {
'migrations': migrations.output(migrations_obj)}
for mig in migrations_in_progress['migrations']:
self.assertTrue('id' in mig)
self.assertTrue('deleted' not in mig)
self.assertTrue('deleted_at' not in mig)
filters = {'host': 'host1', 'status': 'migrating',
'cell_name': 'ChildCell'}
self.req.GET = filters
self.mox.StubOutWithMock(self.controller.compute_api,
"get_migrations")
self.controller.compute_api.get_migrations(
self.context, filters).AndReturn(migrations_obj)
self.mox.ReplayAll()
response = self.controller.index(self.req)
self.assertEqual(migrations_in_progress, response)
def test_index_needs_authorization(self):
user_context = context.RequestContext(user_id=None,
project_id=None,
is_admin=False,
read_deleted="no",
overwrite=False)
self.req.environ['nova.context'] = user_context
self.assertRaises(exception.PolicyNotAuthorized, self.controller.index,
self.req)
| eharney/nova | nova/tests/api/openstack/compute/plugins/v3/test_migrations.py | Python | apache-2.0 | 3,995 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.rich_text_components import base
class Link(base.BaseRichTextComponent):
"""A rich-text component for displaying links."""
name = 'Link'
category = 'Basic Input'
description = 'A link to a URL.'
frontend_name = 'link'
tooltip = 'Insert link'
_customization_arg_specs = [{
'name': 'url',
'description': (
'The link URL. If no protocol is specified, HTTPS will be used.'),
'schema': {
'type': 'custom',
'obj_type': 'SanitizedUrl',
},
'default_value': 'https://www.example.com',
}, {
'name': 'text',
'description': (
'The link text. If left blank, the link URL will be used.'),
'schema': {
'type': 'unicode',
},
'default_value': '',
}]
| MaximLich/oppia | extensions/rich_text_components/Link/Link.py | Python | apache-2.0 | 1,451 |
import os
from django.conf import settings
from nose.tools import ok_
import mkt.site.tests
from mkt.developers.utils import check_upload
from mkt.site.storage_utils import local_storage, private_storage
from mkt.site.tests.test_utils_ import get_image_path
class TestCheckUpload(mkt.site.tests.TestCase, mkt.site.tests.MktPaths):
def test_upload_type_not_recognized(self):
with self.assertRaises(ValueError):
check_upload([], 'graphic', 'image/jpg')
def test_icon_ok(self):
with local_storage.open(get_image_path('mozilla-sq.png')) as f:
errors, upload_hash = check_upload(f, 'icon', 'image/png')
ok_(not errors)
ok_(upload_hash)
tmp_img_path = os.path.join(settings.TMP_PATH, 'icon',
upload_hash)
ok_(private_storage.exists(tmp_img_path))
def test_icon_too_small(self):
with local_storage.open(get_image_path('mkt_icon_72.png')) as f:
errors, upload_hash = check_upload(f, 'icon', 'image/png')
ok_(errors)
ok_(upload_hash)
tmp_img_path = os.path.join(settings.TMP_PATH, 'icon',
upload_hash)
ok_(private_storage.exists(tmp_img_path))
def test_preview_ok(self):
with local_storage.open(get_image_path('preview.jpg')) as f:
errors, upload_hash = check_upload(f, 'preview', 'image/png')
ok_(not errors)
ok_(upload_hash)
tmp_img_path = os.path.join(settings.TMP_PATH, 'preview',
upload_hash)
ok_(private_storage.exists(tmp_img_path))
def test_preview_too_small(self):
with local_storage.open(get_image_path('mkt_icon_72.png')) as f:
errors, upload_hash = check_upload(f, 'preview', 'image/png')
ok_(errors)
ok_(upload_hash)
tmp_img_path = os.path.join(settings.TMP_PATH, 'preview',
upload_hash)
ok_(private_storage.exists(tmp_img_path))
def test_promo_img_ok(self):
with local_storage.open(get_image_path('game_1050.jpg')) as f:
errors, upload_hash = check_upload(f, 'promo_img', 'image/png')
ok_(not errors)
ok_(upload_hash)
tmp_img_path = os.path.join(settings.TMP_PATH, 'promo_img',
upload_hash)
ok_(private_storage.exists(tmp_img_path))
def test_promo_img_too_small(self):
with local_storage.open(get_image_path('preview.jpg')) as f:
errors, upload_hash = check_upload(f, 'promo_img', 'image/png')
ok_(errors)
ok_(upload_hash)
tmp_img_path = os.path.join(settings.TMP_PATH, 'promo_img',
upload_hash)
ok_(private_storage.exists(tmp_img_path))
| ddurst/zamboni | mkt/developers/tests/test_utils_.py | Python | bsd-3-clause | 2,956 |
"""
PhysicsWalker.py is for avatars.
A walker control such as this one provides:
- creation of the collision nodes
- handling the keyboard and mouse input for avatar movement
- moving the avatar
it does not:
- play sounds
- play animations
although it does send messeges that allow a listener to play sounds or
animations based on walker events.
"""
from direct.directnotify import DirectNotifyGlobal
from direct.showbase import DirectObject
from direct.controls.ControlManager import CollisionHandlerRayStart
from direct.showbase.InputStateGlobal import inputState
from direct.task.Task import Task
from pandac.PandaModules import *
import math
#import LineStream
class PhysicsWalker(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory("PhysicsWalker")
wantDebugIndicator = base.config.GetBool('want-avatar-physics-indicator', 0)
wantAvatarPhysicsIndicator = base.config.GetBool('want-avatar-physics-indicator', 0)
useLifter = 0
useHeightRay = 0
# special methods
def __init__(self, gravity = -32.1740, standableGround=0.707,
hardLandingForce=16.0):
assert self.debugPrint(
"PhysicsWalker(gravity=%s, standableGround=%s)"%(
gravity, standableGround))
DirectObject.DirectObject.__init__(self)
self.__gravity=gravity
self.__standableGround=standableGround
self.__hardLandingForce=hardLandingForce
self.needToDeltaPos = 0
self.physVelocityIndicator=None
self.avatarControlForwardSpeed=0
self.avatarControlJumpForce=0
self.avatarControlReverseSpeed=0
self.avatarControlRotateSpeed=0
self.__oldAirborneHeight=None
self.getAirborneHeight=None
self.__oldContact=None
self.__oldPosDelta=Vec3(0)
self.__oldDt=0
self.__speed=0.0
self.__rotationSpeed=0.0
self.__slideSpeed=0.0
self.__vel=Vec3(0.0)
self.collisionsActive = 0
self.isAirborne = 0
self.highMark = 0
"""
def spawnTest(self):
assert self.debugPrint("\n\nspawnTest()\n")
if not self.wantDebugIndicator:
return
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.coghq import MovingPlatform
if hasattr(self, "platform"):
# Remove the prior instantiation:
self.moveIval.pause()
del self.moveIval
self.platform.destroy()
del self.platform
model = loader.loadModel('phase_9/models/cogHQ/platform1')
fakeId = id(self)
self.platform = MovingPlatform.MovingPlatform()
self.platform.setupCopyModel(fakeId, model, 'platformcollision')
self.platformRoot = render.attachNewNode("physicsWalker-spawnTest-%s"%fakeId)
self.platformRoot.setPos(base.localAvatar, Vec3(0.0, 3.0, 1.0))
self.platformRoot.setHpr(base.localAvatar, Vec3.zero())
self.platform.reparentTo(self.platformRoot)
startPos = Vec3(0.0, -15.0, 0.0)
endPos = Vec3(0.0, 15.0, 0.0)
distance = Vec3(startPos-endPos).length()
duration = distance/4
self.moveIval = Sequence(
WaitInterval(0.3),
LerpPosInterval(self.platform, duration,
endPos, startPos=startPos,
name='platformOut%s' % fakeId,
fluid = 1),
WaitInterval(0.3),
LerpPosInterval(self.platform, duration,
startPos, startPos=endPos,
name='platformBack%s' % fakeId,
fluid = 1),
name='platformIval%s' % fakeId,
)
self.moveIval.loop()
"""
def setWalkSpeed(self, forward, jump, reverse, rotate):
assert self.debugPrint("setWalkSpeed()")
self.avatarControlForwardSpeed=forward
self.avatarControlJumpForce=jump
self.avatarControlReverseSpeed=reverse
self.avatarControlRotateSpeed=rotate
def getSpeeds(self):
#assert self.debugPrint("getSpeeds()")
return (self.__speed, self.__rotationSpeed)
def setAvatar(self, avatar):
self.avatar = avatar
if avatar is not None:
self.setupPhysics(avatar)
def setupRay(self, floorBitmask, floorOffset):
# This is a ray cast from your head down to detect floor polygons
# A toon is about 4.0 feet high, so start it there
self.cRay = CollisionRay(0.0, 0.0, CollisionHandlerRayStart, 0.0, 0.0, -1.0)
cRayNode = CollisionNode('PW.cRayNode')
cRayNode.addSolid(self.cRay)
self.cRayNodePath = self.avatarNodePath.attachNewNode(cRayNode)
self.cRayBitMask = floorBitmask
cRayNode.setFromCollideMask(self.cRayBitMask)
cRayNode.setIntoCollideMask(BitMask32.allOff())
if self.useLifter:
# set up floor collision mechanism
self.lifter = CollisionHandlerFloor()
self.lifter.setInPattern("enter%in")
self.lifter.setOutPattern("exit%in")
self.lifter.setOffset(floorOffset)
# Limit our rate-of-fall with the lifter.
# If this is too low, we actually "fall" off steep stairs
# and float above them as we go down. I increased this
# from 8.0 to 16.0 to prevent this
#self.lifter.setMaxVelocity(16.0)
#self.bobNodePath = self.avatarNodePath.attachNewNode("bob")
#self.lifter.addCollider(self.cRayNodePath, self.cRayNodePath)
self.lifter.addCollider(self.cRayNodePath, self.avatarNodePath)
else: # useCollisionHandlerQueue
self.cRayQueue = CollisionHandlerQueue()
self.cTrav.addCollider(self.cRayNodePath, self.cRayQueue)
def determineHeight(self):
"""
returns the height of the avatar above the ground.
If there is no floor below the avatar, 0.0 is returned.
aka get airborne height.
"""
if self.useLifter:
height = self.avatarNodePath.getPos(self.cRayNodePath)
# If the shadow where not pointed strait down, we would need to
# get magnitude of the vector. Since it is strait down, we'll
# just get the z:
#spammy --> assert self.debugPrint("getAirborneHeight() returning %s"%(height.getZ(),))
assert onScreenDebug.add("height", height.getZ())
return height.getZ() - self.floorOffset
else: # useCollisionHandlerQueue
"""
returns the height of the avatar above the ground.
If there is no floor below the avatar, 0.0 is returned.
aka get airborne height.
"""
height = 0.0
#*#self.cRayTrav.traverse(render)
if self.cRayQueue.getNumEntries() != 0:
# We have a floor.
# Choose the highest of the possibly several floors we're over:
self.cRayQueue.sortEntries()
floorPoint = self.cRayQueue.getEntry(0).getFromIntersectionPoint()
height = -floorPoint.getZ()
self.cRayQueue.clearEntries()
if __debug__:
onScreenDebug.add("height", height)
return height
def setupSphere(self, bitmask, avatarRadius):
"""
Set up the collision sphere
"""
# This is a sphere on the ground to detect barrier collisions
self.avatarRadius = avatarRadius
centerHeight = avatarRadius
if self.useHeightRay:
centerHeight *= 2.0
self.cSphere = CollisionSphere(0.0, 0.0, centerHeight, avatarRadius)
cSphereNode = CollisionNode('PW.cSphereNode')
cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = self.avatarNodePath.attachNewNode(cSphereNode)
self.cSphereBitMask = bitmask
cSphereNode.setFromCollideMask(self.cSphereBitMask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
# set up collision mechanism
self.pusher = PhysicsCollisionHandler()
self.pusher.setInPattern("enter%in")
self.pusher.setOutPattern("exit%in")
self.pusher.addCollider(self.cSphereNodePath, self.avatarNodePath)
def setupPhysics(self, avatarNodePath):
assert self.debugPrint("setupPhysics()")
# Connect to Physics Manager:
self.actorNode=ActorNode("PW physicsActor")
self.actorNode.getPhysicsObject().setOriented(1)
self.actorNode.getPhysical(0).setViscosity(0.1)
physicsActor=NodePath(self.actorNode)
avatarNodePath.reparentTo(physicsActor)
avatarNodePath.assign(physicsActor)
self.phys=PhysicsManager()
fn=ForceNode("gravity")
fnp=NodePath(fn)
#fnp.reparentTo(physicsActor)
fnp.reparentTo(render)
gravity=LinearVectorForce(0.0, 0.0, self.__gravity)
fn.addForce(gravity)
self.phys.addLinearForce(gravity)
self.gravity = gravity
fn=ForceNode("priorParent")
fnp=NodePath(fn)
fnp.reparentTo(render)
priorParent=LinearVectorForce(0.0, 0.0, 0.0)
fn.addForce(priorParent)
self.phys.addLinearForce(priorParent)
self.priorParentNp = fnp
self.priorParent = priorParent
fn=ForceNode("viscosity")
fnp=NodePath(fn)
#fnp.reparentTo(physicsActor)
fnp.reparentTo(render)
self.avatarViscosity=LinearFrictionForce(0.0, 1.0, 0)
#self.avatarViscosity.setCoef(0.9)
fn.addForce(self.avatarViscosity)
self.phys.addLinearForce(self.avatarViscosity)
self.phys.attachLinearIntegrator(LinearEulerIntegrator())
self.phys.attachPhysicalNode(physicsActor.node())
self.acForce=LinearVectorForce(0.0, 0.0, 0.0)
fn=ForceNode("avatarControls")
fnp=NodePath(fn)
fnp.reparentTo(render)
fn.addForce(self.acForce)
self.phys.addLinearForce(self.acForce)
#self.phys.removeLinearForce(self.acForce)
#fnp.remove()
return avatarNodePath
def initializeCollisions(self, collisionTraverser, avatarNodePath,
wallBitmask, floorBitmask,
avatarRadius = 1.4, floorOffset = 1.0, reach = 1.0):
"""
Set up the avatar collisions
"""
assert self.debugPrint("initializeCollisions()")
assert not avatarNodePath.isEmpty()
self.cTrav = collisionTraverser
self.floorOffset = floorOffset = 7.0
self.avatarNodePath = self.setupPhysics(avatarNodePath)
if 0 or self.useHeightRay:
#self.setupRay(floorBitmask, avatarRadius)
self.setupRay(floorBitmask, 0.0)
self.setupSphere(wallBitmask|floorBitmask, avatarRadius)
self.setCollisionsActive(1)
def setAirborneHeightFunc(self, getAirborneHeight):
self.getAirborneHeight = getAirborneHeight
def setAvatarPhysicsIndicator(self, indicator):
"""
indicator is a NodePath
"""
assert self.debugPrint("setAvatarPhysicsIndicator()")
self.cSphereNodePath.show()
if indicator:
# Indicator Node:
change=render.attachNewNode("change")
#change.setPos(Vec3(1.0, 1.0, 1.0))
#change.setHpr(0.0, 0.0, 0.0)
change.setScale(0.1)
#change.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
indicator.reparentTo(change)
indicatorNode=render.attachNewNode("physVelocityIndicator")
#indicatorNode.setScale(0.1)
#indicatorNode.setP(90.0)
indicatorNode.setPos(self.avatarNodePath, 0.0, 0.0, 6.0)
indicatorNode.setColor(0.0, 0.0, 1.0, 1.0)
change.reparentTo(indicatorNode)
self.physVelocityIndicator=indicatorNode
# Contact Node:
contactIndicatorNode=render.attachNewNode("physContactIndicator")
contactIndicatorNode.setScale(0.25)
contactIndicatorNode.setP(90.0)
contactIndicatorNode.setPos(self.avatarNodePath, 0.0, 0.0, 5.0)
contactIndicatorNode.setColor(1.0, 0.0, 0.0, 1.0)
indicator.instanceTo(contactIndicatorNode)
self.physContactIndicator=contactIndicatorNode
else:
print "failed load of physics indicator"
def avatarPhysicsIndicator(self, task):
#assert self.debugPrint("avatarPhysicsIndicator()")
# Velocity:
self.physVelocityIndicator.setPos(self.avatarNodePath, 0.0, 0.0, 6.0)
physObject=self.actorNode.getPhysicsObject()
a=physObject.getVelocity()
self.physVelocityIndicator.setScale(math.sqrt(a.length()))
a+=self.physVelocityIndicator.getPos()
self.physVelocityIndicator.lookAt(Point3(a))
# Contact:
contact=self.actorNode.getContactVector()
if contact==Vec3.zero():
self.physContactIndicator.hide()
else:
self.physContactIndicator.show()
self.physContactIndicator.setPos(self.avatarNodePath, 0.0, 0.0, 5.0)
#contact=self.actorNode.getContactVector()
point=Point3(contact+self.physContactIndicator.getPos())
self.physContactIndicator.lookAt(point)
return Task.cont
def deleteCollisions(self):
assert self.debugPrint("deleteCollisions()")
del self.cTrav
if self.useHeightRay:
del self.cRayQueue
self.cRayNodePath.removeNode()
del self.cRayNodePath
del self.cSphere
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
del self.pusher
del self.getAirborneHeight
def setCollisionsActive(self, active = 1):
assert self.debugPrint("collisionsActive(active=%s)"%(active,))
if self.collisionsActive != active:
self.collisionsActive = active
if active:
self.cTrav.addCollider(self.cSphereNodePath, self.pusher)
if self.useHeightRay:
if self.useLifter:
self.cTrav.addCollider(self.cRayNodePath, self.lifter)
else:
self.cTrav.addCollider(self.cRayNodePath, self.cRayQueue)
else:
self.cTrav.removeCollider(self.cSphereNodePath)
if self.useHeightRay:
self.cTrav.removeCollider(self.cRayNodePath)
# Now that we have disabled collisions, make one more pass
# right now to ensure we aren't standing in a wall.
self.oneTimeCollide()
def getCollisionsActive(self):
assert self.debugPrint(
"getCollisionsActive() returning=%s"%(
self.collisionsActive,))
return self.collisionsActive
def placeOnFloor(self):
"""
Make a reasonable effort to place the avatar on the ground.
For example, this is useful when switching away from the
current walker.
"""
self.oneTimeCollide()
self.avatarNodePath.setZ(self.avatarNodePath.getZ()-self.getAirborneHeight())
def oneTimeCollide(self):
"""
Makes one quick collision pass for the avatar, for instance as
a one-time straighten-things-up operation after collisions
have been disabled.
"""
assert self.debugPrint("oneTimeCollide()")
tempCTrav = CollisionTraverser("oneTimeCollide")
if self.useHeightRay:
if self.useLifter:
tempCTrav.addCollider(self.cRayNodePath, self.lifter)
else:
tempCTrav.addCollider(self.cRayNodePath, self.cRayQueue)
tempCTrav.traverse(render)
def addBlastForce(self, vector):
pass
def displayDebugInfo(self):
"""
For debug use.
"""
onScreenDebug.add("w controls", "PhysicsWalker")
if self.useLifter:
onScreenDebug.add("w airborneHeight", self.lifter.getAirborneHeight())
onScreenDebug.add("w isOnGround", self.lifter.isOnGround())
#onScreenDebug.add("w gravity", self.lifter.getGravity())
onScreenDebug.add("w contact normal", self.lifter.getContactNormal().pPrintValues())
onScreenDebug.add("w impact", self.lifter.getImpactVelocity())
onScreenDebug.add("w velocity", self.lifter.getVelocity())
onScreenDebug.add("w hasContact", self.lifter.hasContact())
#onScreenDebug.add("w falling", self.falling)
#onScreenDebug.add("w jumpForce", self.avatarControlJumpForce)
#onScreenDebug.add("w mayJump", self.mayJump)
onScreenDebug.add("w isAirborne", self.isAirborne)
def handleAvatarControls(self, task):
"""
Check on the arrow keys and update the avatar.
"""
if __debug__:
if self.wantDebugIndicator:
onScreenDebug.append("localAvatar pos = %s\n"%(base.localAvatar.getPos().pPrintValues(),))
onScreenDebug.append("localAvatar h = % 10.4f\n"%(base.localAvatar.getH(),))
onScreenDebug.append("localAvatar anim = %s\n"%(base.localAvatar.animFSM.getCurrentState().getName(),))
#assert self.debugPrint("handleAvatarControls(task=%s)"%(task,))
physObject=self.actorNode.getPhysicsObject()
#rotAvatarToPhys=Mat3.rotateMatNormaxis(-self.avatarNodePath.getH(), Vec3.up())
#rotPhysToAvatar=Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
contact=self.actorNode.getContactVector()
# hack fix for falling through the floor:
if contact==Vec3.zero() and self.avatarNodePath.getZ()<-50.0:
# DCR: don't reset X and Y; allow player to move
self.reset()
self.avatarNodePath.setZ(50.0)
messenger.send("walkerIsOutOfWorld", [self.avatarNodePath])
if self.wantDebugIndicator:
self.displayDebugInfo()
# get the button states:
forward = inputState.isSet("forward")
reverse = inputState.isSet("reverse")
turnLeft = inputState.isSet("turnLeft")
turnRight = inputState.isSet("turnRight")
slide = 0#inputState.isSet("slide")
slideLeft = 0#inputState.isSet("slideLeft")
slideRight = 0#inputState.isSet("slideRight")
jump = inputState.isSet("jump")
# Check for Auto-Run
if base.localAvatar.getAutoRun():
forward = 1
reverse = 0
# Determine what the speeds are based on the buttons:
self.__speed=(forward and self.avatarControlForwardSpeed or
reverse and -self.avatarControlReverseSpeed)
avatarSlideSpeed=self.avatarControlForwardSpeed*0.5
#self.__slideSpeed=slide and (
# (turnLeft and -avatarSlideSpeed) or
# (turnRight and avatarSlideSpeed))
self.__slideSpeed=(
(slideLeft and -avatarSlideSpeed) or
(slideRight and avatarSlideSpeed))
self.__rotationSpeed=not slide and (
(turnLeft and self.avatarControlRotateSpeed) or
(turnRight and -self.avatarControlRotateSpeed))
# How far did we move based on the amount of time elapsed?
dt=ClockObject.getGlobalClock().getDt()
if self.needToDeltaPos:
self.setPriorParentVector()
self.needToDeltaPos = 0
#self.__oldPosDelta = render.getRelativeVector(
# self.avatarNodePath,
# self.avatarNodePath.getPosDelta(render))
#self.__oldPosDelta = self.avatarNodePath.getRelativeVector(
# render,
# self.avatarNodePath.getPosDelta(render))
self.__oldPosDelta = self.avatarNodePath.getPosDelta(render)
self.__oldDt = dt
#posDelta = self.avatarNodePath.getPosDelta(render)
#if posDelta==Vec3.zero():
# self.priorParent.setVector(self.__oldPosDelta)
#else:
# self.priorParent.setVector(Vec3.zero())
# # We must copy the vector to preserve it:
# self.__oldPosDelta=Vec3(posDelta)
if __debug__:
if self.wantDebugIndicator:
onScreenDebug.add("posDelta1",
self.avatarNodePath.getPosDelta(render).pPrintValues())
if 0:
onScreenDebug.add("posDelta3",
render.getRelativeVector(
self.avatarNodePath,
self.avatarNodePath.getPosDelta(render)).pPrintValues())
if 0:
onScreenDebug.add("gravity",
self.gravity.getLocalVector().pPrintValues())
onScreenDebug.add("priorParent",
self.priorParent.getLocalVector().pPrintValues())
onScreenDebug.add("avatarViscosity",
"% 10.4f"%(self.avatarViscosity.getCoef(),))
onScreenDebug.add("physObject pos",
physObject.getPosition().pPrintValues())
onScreenDebug.add("physObject hpr",
physObject.getOrientation().getHpr().pPrintValues())
onScreenDebug.add("physObject orien",
physObject.getOrientation().pPrintValues())
if 1:
onScreenDebug.add("physObject vel",
physObject.getVelocity().pPrintValues())
onScreenDebug.add("physObject len",
"% 10.4f"%physObject.getVelocity().length())
if 0:
onScreenDebug.add("posDelta4",
self.priorParentNp.getRelativeVector(
render,
self.avatarNodePath.getPosDelta(render)).pPrintValues())
if 1:
onScreenDebug.add("priorParent",
self.priorParent.getLocalVector().pPrintValues())
if 0:
onScreenDebug.add("priorParent po",
self.priorParent.getVector(physObject).pPrintValues())
if 0:
onScreenDebug.add("__posDelta",
self.__oldPosDelta.pPrintValues())
if 1:
onScreenDebug.add("contact",
contact.pPrintValues())
#onScreenDebug.add("airborneHeight", "% 10.4f"%(
# self.getAirborneHeight(),))
if 0:
onScreenDebug.add("__oldContact",
contact.pPrintValues())
onScreenDebug.add("__oldAirborneHeight", "% 10.4f"%(
self.getAirborneHeight(),))
airborneHeight=self.getAirborneHeight()
if airborneHeight > self.highMark:
self.highMark = airborneHeight
if __debug__:
onScreenDebug.add("highMark", "% 10.4f"%(self.highMark,))
#if airborneHeight < 0.1: #contact!=Vec3.zero():
if 1:
if (airborneHeight > self.avatarRadius*0.5
or physObject.getVelocity().getZ() > 0.0
): # Check stair angles before changing this.
# ...the avatar is airborne (maybe a lot or a tiny amount).
self.isAirborne = 1
else:
# ...the avatar is very close to the ground (close enough to be
# considered on the ground).
if self.isAirborne and physObject.getVelocity().getZ() <= 0.0:
# ...the avatar has landed.
contactLength = contact.length()
if contactLength>self.__hardLandingForce:
#print "jumpHardLand"
messenger.send("jumpHardLand")
else:
#print "jumpLand"
messenger.send("jumpLand")
self.priorParent.setVector(Vec3.zero())
self.isAirborne = 0
elif jump:
#print "jump"
#self.__jumpButton=0
messenger.send("jumpStart")
if 0:
# ...jump away from walls and with with the slope normal.
jumpVec=Vec3(contact+Vec3.up())
#jumpVec=Vec3(rotAvatarToPhys.xform(jumpVec))
jumpVec.normalize()
else:
# ...jump straight up, even if next to a wall.
jumpVec=Vec3.up()
jumpVec*=self.avatarControlJumpForce
physObject.addImpulse(Vec3(jumpVec))
self.isAirborne = 1 # Avoid double impulse before fully airborne.
else:
self.isAirborne = 0
if __debug__:
onScreenDebug.add("isAirborne", "%d"%(self.isAirborne,))
else:
if contact!=Vec3.zero():
# ...the avatar has touched something (but might not be on the ground).
contactLength = contact.length()
contact.normalize()
angle=contact.dot(Vec3.up())
if angle>self.__standableGround:
# ...avatar is on standable ground.
if self.__oldContact==Vec3.zero():
#if self.__oldAirborneHeight > 0.1: #self.__oldContact==Vec3.zero():
# ...avatar was airborne.
self.jumpCount-=1
if contactLength>self.__hardLandingForce:
messenger.send("jumpHardLand")
else:
messenger.send("jumpLand")
elif jump:
self.jumpCount+=1
#self.__jumpButton=0
messenger.send("jumpStart")
jump=Vec3(contact+Vec3.up())
#jump=Vec3(rotAvatarToPhys.xform(jump))
jump.normalize()
jump*=self.avatarControlJumpForce
physObject.addImpulse(Vec3(jump))
if contact!=self.__oldContact:
# We must copy the vector to preserve it:
self.__oldContact=Vec3(contact)
self.__oldAirborneHeight=airborneHeight
moveToGround = Vec3.zero()
if not self.useHeightRay or self.isAirborne:
# ...the airborne check is a hack to stop sliding.
self.phys.doPhysics(dt)
if __debug__:
onScreenDebug.add("phys", "on")
else:
physObject.setVelocity(Vec3.zero())
#if airborneHeight>0.001 and contact==Vec3.zero():
# moveToGround = Vec3(0.0, 0.0, -airborneHeight)
#moveToGround = Vec3(0.0, 0.0, -airborneHeight)
moveToGround = Vec3(0.0, 0.0, -self.determineHeight())
if __debug__:
onScreenDebug.add("phys", "off")
# Check to see if we're moving at all:
if self.__speed or self.__slideSpeed or self.__rotationSpeed or moveToGround!=Vec3.zero():
distance = dt * self.__speed
slideDistance = dt * self.__slideSpeed
rotation = dt * self.__rotationSpeed
#debugTempH=self.avatarNodePath.getH()
assert self.avatarNodePath.getQuat().isSameDirection(physObject.getOrientation())
assert self.avatarNodePath.getPos().almostEqual(physObject.getPosition(), 0.0001)
# update pos:
# Take a step in the direction of our previous heading.
self.__vel=Vec3(
Vec3.forward() * distance +
Vec3.right() * slideDistance)
# rotMat is the rotation matrix corresponding to
# our previous heading.
rotMat=Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
step=rotMat.xform(self.__vel)
physObject.setPosition(Point3(
physObject.getPosition()+step+moveToGround))
# update hpr:
o=physObject.getOrientation()
r=LRotationf()
r.setHpr(Vec3(rotation, 0.0, 0.0))
physObject.setOrientation(o*r)
# sync the change:
self.actorNode.updateTransform()
assert self.avatarNodePath.getQuat().isSameDirection(physObject.getOrientation())
assert self.avatarNodePath.getPos().almostEqual(physObject.getPosition(), 0.0001)
#assert self.avatarNodePath.getH()==debugTempH-rotation
messenger.send("avatarMoving")
else:
self.__vel.set(0.0, 0.0, 0.0)
# Clear the contact vector so we can tell if we contact something next frame:
self.actorNode.setContactVector(Vec3.zero())
return Task.cont
def doDeltaPos(self):
assert self.debugPrint("doDeltaPos()")
self.needToDeltaPos = 1
def setPriorParentVector(self):
assert self.debugPrint("doDeltaPos()")
print "self.__oldDt", self.__oldDt, "self.__oldPosDelta", self.__oldPosDelta
if __debug__:
onScreenDebug.add("__oldDt", "% 10.4f"%self.__oldDt)
onScreenDebug.add("self.__oldPosDelta",
self.__oldPosDelta.pPrintValues())
velocity = self.__oldPosDelta*(1/self.__oldDt)*4.0 # *4.0 is a hack
assert self.debugPrint(" __oldPosDelta=%s"%(self.__oldPosDelta,))
assert self.debugPrint(" velocity=%s"%(velocity,))
self.priorParent.setVector(Vec3(velocity))
if __debug__:
if self.wantDebugIndicator:
onScreenDebug.add("velocity", velocity.pPrintValues())
def reset(self):
assert self.debugPrint("reset()")
self.actorNode.getPhysicsObject().resetPosition(self.avatarNodePath.getPos())
self.priorParent.setVector(Vec3.zero())
self.highMark = 0
self.actorNode.setContactVector(Vec3.zero())
if __debug__:
contact=self.actorNode.getContactVector()
onScreenDebug.add("priorParent po",
self.priorParent.getVector(self.actorNode.getPhysicsObject()).pPrintValues())
onScreenDebug.add("highMark", "% 10.4f"%(self.highMark,))
onScreenDebug.add("contact", contact.pPrintValues())
def getVelocity(self):
physObject=self.actorNode.getPhysicsObject()
return physObject.getVelocity()
def enableAvatarControls(self):
"""
Activate the arrow keys, etc.
"""
assert self.debugPrint("enableAvatarControls()")
assert self.collisionsActive
if __debug__:
#self.accept("control-f3", self.spawnTest) #*#
self.accept("f3", self.reset) # for debugging only.
taskName = "AvatarControls-%s"%(id(self),)
# remove any old
taskMgr.remove(taskName)
# spawn the new task
taskMgr.add(self.handleAvatarControls, taskName, 25)
if self.physVelocityIndicator:
taskMgr.add(self.avatarPhysicsIndicator, "AvatarControlsIndicator%s"%(id(self),), 35)
def disableAvatarControls(self):
"""
Ignore the arrow keys, etc.
"""
assert self.debugPrint("disableAvatarControls()")
taskName = "AvatarControls-%s"%(id(self),)
taskMgr.remove(taskName)
taskName = "AvatarControlsIndicator%s"%(id(self),)
taskMgr.remove(taskName)
if __debug__:
self.ignore("control-f3") #*#
self.ignore("f3")
def flushEventHandlers(self):
if hasattr(self, 'cTrav'):
if self.useLifter:
self.lifter.flush() # not currently defined or needed
self.pusher.flush()
if __debug__:
def setupAvatarPhysicsIndicator(self):
if self.wantDebugIndicator:
indicator=loader.loadModel('phase_5/models/props/dagger')
#self.walkControls.setAvatarPhysicsIndicator(indicator)
def debugPrint(self, message):
"""for debugging"""
return self.notify.debug(
str(id(self))+' '+message)
| hj3938/panda3d | direct/src/controls/PhysicsWalker.py | Python | bsd-3-clause | 32,513 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import shutil
import sys
import tempfile
from pylib import cmd_helper
from pylib import constants
from pylib import pexpect
from test_package import TestPackage
class TestPackageExecutable(TestPackage):
"""A helper class for running stand-alone executables."""
_TEST_RUNNER_RET_VAL_FILE = 'gtest_retval'
def __init__(self, adb, device, test_suite, timeout,
cleanup_test_files, tool, symbols_dir=None):
"""
Args:
adb: ADB interface the tests are using.
device: Device to run the tests.
test_suite: A specific test suite to run, empty to run all.
timeout: Timeout for each test.
cleanup_test_files: Whether or not to cleanup test files on device.
tool: Name of the Valgrind tool.
symbols_dir: Directory to put the stripped binaries.
"""
TestPackage.__init__(self, adb, device, test_suite, timeout,
cleanup_test_files, tool)
self.symbols_dir = symbols_dir
def _GetGTestReturnCode(self):
ret = None
ret_code = 1 # Assume failure if we can't find it
ret_code_file = tempfile.NamedTemporaryFile()
try:
if not self.adb.Adb().Pull(
constants.TEST_EXECUTABLE_DIR + '/' +
TestPackageExecutable._TEST_RUNNER_RET_VAL_FILE,
ret_code_file.name):
logging.critical('Unable to pull gtest ret val file %s',
ret_code_file.name)
raise ValueError
ret_code = file(ret_code_file.name).read()
ret = int(ret_code)
except ValueError:
logging.critical('Error reading gtest ret val file %s [%s]',
ret_code_file.name, ret_code)
ret = 1
return ret
def _AddNativeCoverageExports(self):
# export GCOV_PREFIX set the path for native coverage results
# export GCOV_PREFIX_STRIP indicates how many initial directory
# names to strip off the hardwired absolute paths.
# This value is calculated in buildbot.sh and
# depends on where the tree is built.
# Ex: /usr/local/google/code/chrome will become
# /code/chrome if GCOV_PREFIX_STRIP=3
try:
depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP']
except KeyError:
logging.info('NATIVE_COVERAGE_DEPTH_STRIP is not defined: '
'No native coverage.')
return ''
export_string = ('export GCOV_PREFIX="%s/gcov"\n' %
self.adb.GetExternalStorage())
export_string += 'export GCOV_PREFIX_STRIP=%s\n' % depth
return export_string
def ClearApplicationState(self):
"""Clear the application state."""
self.adb.KillAllBlocking(self.test_suite_basename, 30)
def GetAllTests(self):
"""Returns a list of all tests available in the test suite."""
all_tests = self.adb.RunShellCommand(
'%s %s/%s --gtest_list_tests' %
(self.tool.GetTestWrapper(),
constants.TEST_EXECUTABLE_DIR,
self.test_suite_basename))
return self._ParseGTestListTests(all_tests)
def CreateTestRunnerScript(self, gtest_filter, test_arguments):
"""Creates a test runner script and pushes to the device.
Args:
gtest_filter: A gtest_filter flag.
test_arguments: Additional arguments to pass to the test binary.
"""
tool_wrapper = self.tool.GetTestWrapper()
sh_script_file = tempfile.NamedTemporaryFile()
# We need to capture the exit status from the script since adb shell won't
# propagate to us.
sh_script_file.write('cd %s\n'
'%s'
'%s %s/%s --gtest_filter=%s %s\n'
'echo $? > %s' %
(constants.TEST_EXECUTABLE_DIR,
self._AddNativeCoverageExports(),
tool_wrapper, constants.TEST_EXECUTABLE_DIR,
self.test_suite_basename,
gtest_filter, test_arguments,
TestPackageExecutable._TEST_RUNNER_RET_VAL_FILE))
sh_script_file.flush()
cmd_helper.RunCmd(['chmod', '+x', sh_script_file.name])
self.adb.PushIfNeeded(
sh_script_file.name,
constants.TEST_EXECUTABLE_DIR + '/chrome_test_runner.sh')
logging.info('Conents of the test runner script: ')
for line in open(sh_script_file.name).readlines():
logging.info(' ' + line.rstrip())
def RunTestsAndListResults(self):
"""Runs all the tests and checks for failures.
Returns:
A TestRunResults object.
"""
args = ['adb', '-s', self.device, 'shell', 'sh',
constants.TEST_EXECUTABLE_DIR + '/chrome_test_runner.sh']
logging.info(args)
p = pexpect.spawn(args[0], args[1:], logfile=sys.stdout)
return self._WatchTestOutput(p)
def StripAndCopyExecutable(self):
"""Strips and copies the executable to the device."""
if self.tool.NeedsDebugInfo():
target_name = self.test_suite
else:
target_name = self.test_suite + '_' + self.device + '_stripped'
should_strip = True
if os.path.isfile(target_name):
logging.info('Found target file %s' % target_name)
target_mtime = os.stat(target_name).st_mtime
source_mtime = os.stat(self.test_suite).st_mtime
if target_mtime > source_mtime:
logging.info('Target mtime (%d) is newer than source (%d), assuming '
'no change.' % (target_mtime, source_mtime))
should_strip = False
if should_strip:
logging.info('Did not find up-to-date stripped binary. Generating a '
'new one (%s).' % target_name)
# Whenever we generate a stripped binary, copy to the symbols dir. If we
# aren't stripping a new binary, assume it's there.
if self.symbols_dir:
if not os.path.exists(self.symbols_dir):
os.makedirs(self.symbols_dir)
shutil.copy(self.test_suite, self.symbols_dir)
strip = os.environ['STRIP']
cmd_helper.RunCmd([strip, self.test_suite, '-o', target_name])
test_binary = constants.TEST_EXECUTABLE_DIR + '/' + self.test_suite_basename
self.adb.PushIfNeeded(target_name, test_binary)
def _GetTestSuiteBaseName(self):
"""Returns the base name of the test suite."""
return os.path.basename(self.test_suite)
| wangscript/libjingle-1 | trunk/build/android/pylib/gtest/test_package_executable.py | Python | bsd-3-clause | 6,540 |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2013 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from sleekxmpp.stanza import Iq, Message
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.plugins.google.nosave import stanza
log = logging.getLogger(__name__)
class GoogleNoSave(BasePlugin):
"""
Google: Off the Record Chats
NOTE: This is NOT an encryption method.
Also see <https://developers.google.com/talk/jep_extensions/otr>.
"""
name = 'google_nosave'
description = 'Google: Off the Record Chats'
dependencies = set(['google_settings'])
stanza = stanza
def plugin_init(self):
register_stanza_plugin(Message, stanza.NoSave)
register_stanza_plugin(Iq, stanza.NoSaveQuery)
self.xmpp.register_handler(
Callback('Google Nosave',
StanzaPath('iq@type=set/google_nosave'),
self._handle_nosave_change))
def plugin_end(self):
self.xmpp.remove_handler('Google Nosave')
def enable(self, jid=None, block=True, timeout=None, callback=None):
if jid is None:
self.xmpp['google_settings'].update({'archiving_enabled': False},
block=block, timeout=timeout, callback=callback)
else:
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['google_nosave']['item']['jid'] = jid
iq['google_nosave']['item']['value'] = True
return iq.send(block=block, timeout=timeout, callback=callback)
def disable(self, jid=None, block=True, timeout=None, callback=None):
if jid is None:
self.xmpp['google_settings'].update({'archiving_enabled': True},
block=block, timeout=timeout, callback=callback)
else:
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['google_nosave']['item']['jid'] = jid
iq['google_nosave']['item']['value'] = False
return iq.send(block=block, timeout=timeout, callback=callback)
def get(self, block=True, timeout=None, callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'get'
iq.enable('google_nosave')
return iq.send(block=block, timeout=timeout, callback=callback)
def _handle_nosave_change(self, iq):
reply = self.xmpp.Iq()
reply['type'] = 'result'
reply['id'] = iq['id']
reply['to'] = iq['from']
reply.send()
self.xmpp.event('google_nosave_change', iq)
| danielvdao/facebookMacBot | venv/lib/python2.7/site-packages/sleekxmpp/plugins/google/nosave/nosave.py | Python | mit | 2,759 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget.utils.simple_persistence import SimplePersistence
class TestSimplePersistence(object):
config = """
tasks:
test:
mock:
- {title: 'irrelevant'}
"""
def test_setdefault(self, execute_task):
task = execute_task('test')
task = task
value1 = task.simple_persistence.setdefault('test', 'abc')
value2 = task.simple_persistence.setdefault('test', 'def')
assert value1 == value2, 'set default broken'
def test_nosession(self, execute_task):
persist = SimplePersistence('testplugin')
persist['aoeu'] = 'test'
assert persist['aoeu'] == 'test'
# Make sure it commits and actually persists
persist = SimplePersistence('testplugin')
assert persist['aoeu'] == 'test'
| jawilson/Flexget | flexget/tests/test_simple_persistence.py | Python | mit | 968 |
#!/usr/bin/python
#
# One-time tool to convert from old custom API document format to YAML.
#
# $ for i in api/*.txt; do python api2yaml.py $i ${i%%.txt}.yaml; done
#
import os
import sys
def main(f_in, f_out, funcname):
parts = {}
curr = None
partname = None
def quoted(line):
if line.strip() == '':
# Don't print whitespace indent for empty lines
f_out.write('\n')
else:
f_out.write(' %s\n' % line)
for line in f_in:
if len(line) > 0 and line[-1] == '\n':
line = line[:-1]
if len(line) > 0 and line[0] == '=':
partname = line[1:]
curr = []
parts[partname] = curr
continue
curr.append(line)
# Although the key order in the YAML output doesn't matter,
# we want it to be in a specific order to make manual edits
# nicer. Do this by emitting the YAML manually.
#print(repr(parts))
for key in parts.keys():
part = parts[key]
while len(part) > 0 and part[-1] == '':
part = part[:-1]
parts[key] = part
for key in parts.keys():
part = parts[key]
if len(part) == 0:
del parts[key]
f_out.write('name: %s\n' % funcname)
assert(parts.has_key('proto'))
f_out.write('\n')
f_out.write('proto: |\n')
for p in parts['proto']:
quoted(p)
if parts.has_key('stack'):
f_out.write('\n')
f_out.write('stack: |\n')
for p in parts['stack']:
quoted(p)
assert(parts.has_key('summary'))
f_out.write('\n')
f_out.write('summary: |\n')
for p in parts['summary']:
quoted(p)
assert(parts.has_key('example'))
f_out.write('\n')
f_out.write('example: |\n')
for p in parts['example']:
quoted(p)
if parts.has_key('tags'):
f_out.write('\n')
f_out.write('tags:\n')
for p in parts['tags']:
f_out.write(' - %s\n' % p)
if parts.has_key('seealso'):
f_out.write('\n')
f_out.write('seealso:\n')
for p in parts['seealso']:
f_out.write(' - %s\n' % p)
if parts.has_key('introduced'):
assert(len(parts['introduced']) == 1)
f_out.write('\n')
f_out.write('introduced: %s\n' % parts['introduced'][0])
if parts.has_key('fixme'):
f_out.write('fixme: |\n')
for p in parts['fixme']:
quote(p)
if __name__ == '__main__':
with open(sys.argv[1], 'rb') as f_in, \
open(sys.argv[2], 'wb') as f_out:
fn = os.path.basename(sys.argv[1])
fn_plain = os.path.splitext(fn)[0]
main(f_in, f_out, fn_plain)
| kphillisjr/duktape | website/api2yaml.py | Python | mit | 2,302 |
# (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test function
:func:`iris.fileformats.grib._load_convert.product_definition_template_0`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised
# before importing anything else.
import iris.tests as tests
import iris.coords
from iris.tests.unit.fileformats.grib.load_convert import (LoadConvertTest,
empty_metadata)
from iris.fileformats.grib._load_convert import product_definition_template_0
from iris.tests import mock
MDI = 0xffffffff
def section_4():
return {'hoursAfterDataCutoff': MDI,
'minutesAfterDataCutoff': MDI,
'indicatorOfUnitOfTimeRange': 0, # minutes
'forecastTime': 360,
'NV': 0,
'typeOfFirstFixedSurface': 103,
'scaleFactorOfFirstFixedSurface': 0,
'scaledValueOfFirstFixedSurface': 9999,
'typeOfSecondFixedSurface': 255}
class Test(LoadConvertTest):
def test_given_frt(self):
metadata = empty_metadata()
rt_coord = iris.coords.DimCoord(24, 'forecast_reference_time',
units='hours since epoch')
product_definition_template_0(section_4(), metadata, rt_coord)
expected = empty_metadata()
aux = expected['aux_coords_and_dims']
aux.append((iris.coords.DimCoord(6, 'forecast_period', units='hours'),
None))
aux.append((
iris.coords.DimCoord(30, 'time', units='hours since epoch'), None))
aux.append((rt_coord, None))
aux.append((iris.coords.DimCoord(9999, long_name='height', units='m'),
None))
self.assertMetadataEqual(metadata, expected)
def test_given_t(self):
metadata = empty_metadata()
rt_coord = iris.coords.DimCoord(24, 'time',
units='hours since epoch')
product_definition_template_0(section_4(), metadata, rt_coord)
expected = empty_metadata()
aux = expected['aux_coords_and_dims']
aux.append((iris.coords.DimCoord(6, 'forecast_period', units='hours'),
None))
aux.append((
iris.coords.DimCoord(18, 'forecast_reference_time',
units='hours since epoch'), None))
aux.append((rt_coord, None))
aux.append((iris.coords.DimCoord(9999, long_name='height', units='m'),
None))
self.assertMetadataEqual(metadata, expected)
def test_generating_process_warnings(self):
metadata = empty_metadata()
rt_coord = iris.coords.DimCoord(24, 'forecast_reference_time',
units='hours since epoch')
convert_options = iris.fileformats.grib._load_convert.options
emit_warnings = convert_options.warn_on_unsupported
try:
convert_options.warn_on_unsupported = True
with mock.patch('warnings.warn') as warn:
product_definition_template_0(section_4(), metadata, rt_coord)
warn_msgs = [call[1][0] for call in warn.mock_calls]
expected = ['Unable to translate type of generating process.',
'Unable to translate background generating process '
'identifier.',
'Unable to translate forecast generating process '
'identifier.']
self.assertEqual(warn_msgs, expected)
finally:
convert_options.warn_on_unsupported = emit_warnings
if __name__ == '__main__':
tests.main()
| decvalts/iris | lib/iris/tests/unit/fileformats/grib/load_convert/test_product_definition_template_0.py | Python | gpl-3.0 | 4,457 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Huawei
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
###############################################################################
# Documentation
###############################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: hwc_vpc_eip
description:
- elastic ip management.
short_description: Creates a resource of Vpc/EIP in Huawei Cloud
version_added: '2.10'
author: Huawei Inc. (@huaweicloud)
requirements:
- keystoneauth1 >= 3.6.0
options:
state:
description:
- Whether the given object should exist in Huawei Cloud.
type: str
choices: ['present', 'absent']
default: 'present'
timeouts:
description:
- The timeouts for each operations.
type: dict
suboptions:
create:
description:
- The timeouts for create operation.
type: str
default: '5m'
update:
description:
- The timeouts for update operation.
type: str
default: '5m'
type:
description:
- Specifies the EIP type.
type: str
required: true
dedicated_bandwidth:
description:
- Specifies the dedicated bandwidth object.
type: dict
required: false
suboptions:
charge_mode:
description:
- Specifies whether the bandwidth is billed by traffic or
by bandwidth size. The value can be bandwidth or traffic.
If this parameter is left blank or is null character
string, default value bandwidth is used. For IPv6
addresses, the default parameter value is bandwidth
outside China and is traffic in China.
type: str
required: true
name:
description:
- Specifies the bandwidth name. The value is a string of 1
to 64 characters that can contain letters, digits,
underscores C(_), hyphens (-), and periods (.).
type: str
required: true
size:
description:
- Specifies the bandwidth size. The value ranges from 1
Mbit/s to 2000 Mbit/s by default. (The specific range may
vary depending on the configuration in each region. You
can see the bandwidth range of each region on the
management console.) The minimum unit for bandwidth
adjustment varies depending on the bandwidth range. The
details are as follows.
- The minimum unit is 1 Mbit/s if the allowed bandwidth
size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
included).
- The minimum unit is 50 Mbit/s if the allowed bandwidth
size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
included).
- The minimum unit is 500 Mbit/s if the allowed bandwidth
size is greater than 1000 Mbit/s.
type: int
required: true
enterprise_project_id:
description:
- Specifies the enterprise project ID.
type: str
required: false
ip_version:
description:
- The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
parameter is left blank, an IPv4 address will be assigned.
type: int
required: false
ipv4_address:
description:
- Specifies the obtained IPv4 EIP. The system automatically assigns
an EIP if you do not specify it.
type: str
required: false
port_id:
description:
- Specifies the port ID. This parameter is returned only when a
private IP address is bound with the EIP.
type: str
required: false
shared_bandwidth_id:
description:
- Specifies the ID of shared bandwidth.
type: str
required: false
extends_documentation_fragment: hwc
'''
EXAMPLES = '''
# create an eip and bind it to a port
- name: create vpc
hwc_network_vpc:
cidr: "192.168.100.0/24"
name: "ansible_network_vpc_test"
register: vpc
- name: create subnet
hwc_vpc_subnet:
gateway_ip: "192.168.100.32"
name: "ansible_network_subnet_test"
dhcp_enable: True
vpc_id: "{{ vpc.id }}"
cidr: "192.168.100.0/26"
register: subnet
- name: create a port
hwc_vpc_port:
subnet_id: "{{ subnet.id }}"
ip_address: "192.168.100.33"
register: port
- name: create an eip and bind it to a port
hwc_vpc_eip:
type: "5_bgp"
dedicated_bandwidth:
charge_mode: "traffic"
name: "ansible_test_dedicated_bandwidth"
size: 1
port_id: "{{ port.id }}"
'''
RETURN = '''
type:
description:
- Specifies the EIP type.
type: str
returned: success
dedicated_bandwidth:
description:
- Specifies the dedicated bandwidth object.
type: dict
returned: success
contains:
charge_mode:
description:
- Specifies whether the bandwidth is billed by traffic or
by bandwidth size. The value can be bandwidth or traffic.
If this parameter is left blank or is null character
string, default value bandwidth is used. For IPv6
addresses, the default parameter value is bandwidth
outside China and is traffic in China.
type: str
returned: success
name:
description:
- Specifies the bandwidth name. The value is a string of 1
to 64 characters that can contain letters, digits,
underscores C(_), hyphens (-), and periods (.).
type: str
returned: success
size:
description:
- Specifies the bandwidth size. The value ranges from 1
Mbit/s to 2000 Mbit/s by default. (The specific range may
vary depending on the configuration in each region. You
can see the bandwidth range of each region on the
management console.) The minimum unit for bandwidth
adjustment varies depending on the bandwidth range. The
details are as follows:.
- The minimum unit is 1 Mbit/s if the allowed bandwidth
size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
included).
- The minimum unit is 50 Mbit/s if the allowed bandwidth
size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
included).
- The minimum unit is 500 Mbit/s if the allowed bandwidth
size is greater than 1000 Mbit/s.
type: int
returned: success
id:
description:
- Specifies the ID of dedicated bandwidth.
type: str
returned: success
enterprise_project_id:
description:
- Specifies the enterprise project ID.
type: str
returned: success
ip_version:
description:
- The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
parameter is left blank, an IPv4 address will be assigned.
type: int
returned: success
ipv4_address:
description:
- Specifies the obtained IPv4 EIP. The system automatically assigns
an EIP if you do not specify it.
type: str
returned: success
port_id:
description:
- Specifies the port ID. This parameter is returned only when a
private IP address is bound with the EIP.
type: str
returned: success
shared_bandwidth_id:
description:
- Specifies the ID of shared bandwidth.
type: str
returned: success
create_time:
description:
- Specifies the time (UTC time) when the EIP was assigned.
type: str
returned: success
ipv6_address:
description:
- Specifies the obtained IPv6 EIP.
type: str
returned: success
private_ip_address:
description:
- Specifies the private IP address bound with the EIP. This
parameter is returned only when a private IP address is bound
with the EIP.
type: str
returned: success
'''
from ansible.module_utils.hwc_utils import (
Config, HwcClientException, HwcClientException404, HwcModule,
are_different_dicts, build_path, get_region, is_empty_value,
navigate_value, wait_to_finish)
def build_module():
return HwcModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'],
type='str'),
timeouts=dict(type='dict', options=dict(
create=dict(default='5m', type='str'),
update=dict(default='5m', type='str'),
), default=dict()),
type=dict(type='str', required=True),
dedicated_bandwidth=dict(type='dict', options=dict(
charge_mode=dict(type='str', required=True),
name=dict(type='str', required=True),
size=dict(type='int', required=True)
)),
enterprise_project_id=dict(type='str'),
ip_version=dict(type='int'),
ipv4_address=dict(type='str'),
port_id=dict(type='str'),
shared_bandwidth_id=dict(type='str')
),
supports_check_mode=True,
)
def main():
"""Main function"""
module = build_module()
config = Config(module, "vpc")
try:
resource = None
if module.params['id']:
resource = True
else:
v = search_resource(config)
if len(v) > 1:
raise Exception("Found more than one resource(%s)" % ", ".join([
navigate_value(i, ["id"]) for i in v]))
if len(v) == 1:
resource = v[0]
module.params['id'] = navigate_value(resource, ["id"])
result = {}
changed = False
if module.params['state'] == 'present':
if resource is None:
if not module.check_mode:
create(config)
changed = True
current = read_resource(config, exclude_output=True)
expect = user_input_parameters(module)
if are_different_dicts(expect, current):
if not module.check_mode:
update(config)
changed = True
result = read_resource(config)
result['id'] = module.params.get('id')
else:
if resource:
if not module.check_mode:
delete(config)
changed = True
except Exception as ex:
module.fail_json(msg=str(ex))
else:
result['changed'] = changed
module.exit_json(**result)
def user_input_parameters(module):
return {
"dedicated_bandwidth": module.params.get("dedicated_bandwidth"),
"enterprise_project_id": module.params.get("enterprise_project_id"),
"ip_version": module.params.get("ip_version"),
"ipv4_address": module.params.get("ipv4_address"),
"port_id": module.params.get("port_id"),
"shared_bandwidth_id": module.params.get("shared_bandwidth_id"),
"type": module.params.get("type"),
}
def create(config):
module = config.module
client = config.client(get_region(module), "vpc", "project")
timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
opts = user_input_parameters(module)
params = build_create_parameters(opts)
r = send_create_request(module, params, client)
obj = async_wait_create(config, r, client, timeout)
module.params['id'] = navigate_value(obj, ["publicip", "id"])
def update(config):
module = config.module
client = config.client(get_region(module), "vpc", "project")
timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
opts = user_input_parameters(module)
params = build_update_parameters(opts)
if params:
r = send_update_request(module, params, client)
async_wait_update(config, r, client, timeout)
def delete(config):
module = config.module
client = config.client(get_region(module), "vpc", "project")
if module.params["port_id"]:
module.params["port_id"] = ""
update(config)
send_delete_request(module, None, client)
url = build_path(module, "publicips/{id}")
def _refresh_status():
try:
client.get(url)
except HwcClientException404:
return True, "Done"
except Exception:
return None, ""
return True, "Pending"
timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
try:
wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
except Exception as ex:
module.fail_json(msg="module(hwc_vpc_eip): error "
"waiting for api(delete) to "
"be done, error= %s" % str(ex))
def read_resource(config, exclude_output=False):
module = config.module
client = config.client(get_region(module), "vpc", "project")
res = {}
r = send_read_request(module, client)
res["read"] = fill_read_resp_body(r)
return update_properties(module, res, None, exclude_output)
def _build_query_link(opts):
query_params = []
v = navigate_value(opts, ["ip_version"])
if v:
query_params.append("ip_version=" + str(v))
v = navigate_value(opts, ["enterprise_project_id"])
if v:
query_params.append("enterprise_project_id=" + str(v))
query_link = "?marker={marker}&limit=10"
if query_params:
query_link += "&" + "&".join(query_params)
return query_link
def search_resource(config):
module = config.module
client = config.client(get_region(module), "vpc", "project")
opts = user_input_parameters(module)
identity_obj = _build_identity_object(opts)
query_link = _build_query_link(opts)
link = "publicips" + query_link
result = []
p = {'marker': ''}
while True:
url = link.format(**p)
r = send_list_request(module, client, url)
if not r:
break
for item in r:
item = fill_list_resp_body(item)
if not are_different_dicts(identity_obj, item):
result.append(item)
if len(result) > 1:
break
p['marker'] = r[-1].get('id')
return result
def build_create_parameters(opts):
params = dict()
v = expand_create_bandwidth(opts, None)
if not is_empty_value(v):
params["bandwidth"] = v
v = navigate_value(opts, ["enterprise_project_id"], None)
if not is_empty_value(v):
params["enterprise_project_id"] = v
v = expand_create_publicip(opts, None)
if not is_empty_value(v):
params["publicip"] = v
return params
def expand_create_bandwidth(d, array_index):
v = navigate_value(d, ["dedicated_bandwidth"], array_index)
sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
if v and sbwid:
raise Exception("don't input shared_bandwidth_id and "
"dedicated_bandwidth at same time")
if not (v or sbwid):
raise Exception("must input shared_bandwidth_id or "
"dedicated_bandwidth")
if sbwid:
return {
"id": sbwid,
"share_type": "WHOLE"}
return {
"charge_mode": v["charge_mode"],
"name": v["name"],
"share_type": "PER",
"size": v["size"]}
def expand_create_publicip(d, array_index):
r = dict()
v = navigate_value(d, ["ipv4_address"], array_index)
if not is_empty_value(v):
r["ip_address"] = v
v = navigate_value(d, ["ip_version"], array_index)
if not is_empty_value(v):
r["ip_version"] = v
v = navigate_value(d, ["type"], array_index)
if not is_empty_value(v):
r["type"] = v
return r
def send_create_request(module, params, client):
url = "publicips"
try:
r = client.post(url, params)
except HwcClientException as ex:
msg = ("module(hwc_vpc_eip): error running "
"api(create), error: %s" % str(ex))
module.fail_json(msg=msg)
return r
def async_wait_create(config, result, client, timeout):
module = config.module
path_parameters = {
"publicip_id": ["publicip", "id"],
}
data = dict((key, navigate_value(result, path))
for key, path in path_parameters.items())
url = build_path(module, "publicips/{publicip_id}", data)
def _query_status():
r = None
try:
r = client.get(url, timeout=timeout)
except HwcClientException:
return None, ""
try:
s = navigate_value(r, ["publicip", "status"])
return r, s
except Exception:
return None, ""
try:
return wait_to_finish(
["ACTIVE", "DOWN"],
None,
_query_status, timeout)
except Exception as ex:
module.fail_json(msg="module(hwc_vpc_eip): error "
"waiting for api(create) to "
"be done, error= %s" % str(ex))
def build_update_parameters(opts):
params = dict()
v = navigate_value(opts, ["ip_version"], None)
if not is_empty_value(v):
params["ip_version"] = v
v = navigate_value(opts, ["port_id"], None)
if v is not None:
params["port_id"] = v
if not params:
return params
params = {"publicip": params}
return params
def send_update_request(module, params, client):
url = build_path(module, "publicips/{id}")
try:
r = client.put(url, params)
except HwcClientException as ex:
msg = ("module(hwc_vpc_eip): error running "
"api(update), error: %s" % str(ex))
module.fail_json(msg=msg)
return r
def async_wait_update(config, result, client, timeout):
module = config.module
url = build_path(module, "publicips/{id}")
def _query_status():
r = None
try:
r = client.get(url, timeout=timeout)
except HwcClientException:
return None, ""
try:
s = navigate_value(r, ["publicip", "status"])
return r, s
except Exception:
return None, ""
try:
return wait_to_finish(
["ACTIVE", "DOWN"],
None,
_query_status, timeout)
except Exception as ex:
module.fail_json(msg="module(hwc_vpc_eip): error "
"waiting for api(update) to "
"be done, error= %s" % str(ex))
def send_delete_request(module, params, client):
url = build_path(module, "publicips/{id}")
try:
r = client.delete(url, params)
except HwcClientException as ex:
msg = ("module(hwc_vpc_eip): error running "
"api(delete), error: %s" % str(ex))
module.fail_json(msg=msg)
return r
def send_read_request(module, client):
url = build_path(module, "publicips/{id}")
r = None
try:
r = client.get(url)
except HwcClientException as ex:
msg = ("module(hwc_vpc_eip): error running "
"api(read), error: %s" % str(ex))
module.fail_json(msg=msg)
return navigate_value(r, ["publicip"], None)
def fill_read_resp_body(body):
result = dict()
result["bandwidth_id"] = body.get("bandwidth_id")
result["bandwidth_name"] = body.get("bandwidth_name")
result["bandwidth_share_type"] = body.get("bandwidth_share_type")
result["bandwidth_size"] = body.get("bandwidth_size")
result["create_time"] = body.get("create_time")
result["enterprise_project_id"] = body.get("enterprise_project_id")
result["id"] = body.get("id")
result["ip_version"] = body.get("ip_version")
result["port_id"] = body.get("port_id")
result["private_ip_address"] = body.get("private_ip_address")
result["public_ip_address"] = body.get("public_ip_address")
result["public_ipv6_address"] = body.get("public_ipv6_address")
result["status"] = body.get("status")
result["tenant_id"] = body.get("tenant_id")
result["type"] = body.get("type")
return result
def update_properties(module, response, array_index, exclude_output=False):
r = user_input_parameters(module)
if not exclude_output:
v = navigate_value(response, ["read", "create_time"], array_index)
r["create_time"] = v
v = r.get("dedicated_bandwidth")
v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output)
r["dedicated_bandwidth"] = v
v = navigate_value(response, ["read", "enterprise_project_id"],
array_index)
r["enterprise_project_id"] = v
v = navigate_value(response, ["read", "ip_version"], array_index)
r["ip_version"] = v
v = navigate_value(response, ["read", "public_ip_address"], array_index)
r["ipv4_address"] = v
if not exclude_output:
v = navigate_value(response, ["read", "public_ipv6_address"],
array_index)
r["ipv6_address"] = v
v = navigate_value(response, ["read", "port_id"], array_index)
r["port_id"] = v
if not exclude_output:
v = navigate_value(response, ["read", "private_ip_address"],
array_index)
r["private_ip_address"] = v
v = r.get("shared_bandwidth_id")
v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output)
r["shared_bandwidth_id"] = v
v = navigate_value(response, ["read", "type"], array_index)
r["type"] = v
return r
def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output):
v = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
if not (v and v == "PER"):
return current_value
result = current_value
if not result:
result = dict()
if not exclude_output:
v = navigate_value(d, ["read", "bandwidth_id"], array_index)
if v is not None:
result["id"] = v
v = navigate_value(d, ["read", "bandwidth_name"], array_index)
if v is not None:
result["name"] = v
v = navigate_value(d, ["read", "bandwidth_size"], array_index)
if v is not None:
result["size"] = v
return result if result else current_value
def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output):
v = navigate_value(d, ["read", "bandwidth_id"], array_index)
v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
return v if (v1 and v1 == "WHOLE") else current_value
def send_list_request(module, client, url):
r = None
try:
r = client.get(url)
except HwcClientException as ex:
msg = ("module(hwc_vpc_eip): error running "
"api(list), error: %s" % str(ex))
module.fail_json(msg=msg)
return navigate_value(r, ["publicips"], None)
def _build_identity_object(all_opts):
result = dict()
v = expand_list_bandwidth_id(all_opts, None)
result["bandwidth_id"] = v
v = navigate_value(all_opts, ["dedicated_bandwidth", "name"], None)
result["bandwidth_name"] = v
result["bandwidth_share_type"] = None
v = navigate_value(all_opts, ["dedicated_bandwidth", "size"], None)
result["bandwidth_size"] = v
result["create_time"] = None
v = navigate_value(all_opts, ["enterprise_project_id"], None)
result["enterprise_project_id"] = v
result["id"] = None
v = navigate_value(all_opts, ["ip_version"], None)
result["ip_version"] = v
v = navigate_value(all_opts, ["port_id"], None)
result["port_id"] = v
result["private_ip_address"] = None
v = navigate_value(all_opts, ["ipv4_address"], None)
result["public_ip_address"] = v
result["public_ipv6_address"] = None
result["status"] = None
result["tenant_id"] = None
v = navigate_value(all_opts, ["type"], None)
result["type"] = v
return result
def expand_list_bandwidth_id(d, array_index):
v = navigate_value(d, ["dedicated_bandwidth"], array_index)
sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
if v and sbwid:
raise Exception("don't input shared_bandwidth_id and "
"dedicated_bandwidth at same time")
return sbwid
def fill_list_resp_body(body):
result = dict()
result["bandwidth_id"] = body.get("bandwidth_id")
result["bandwidth_name"] = body.get("bandwidth_name")
result["bandwidth_share_type"] = body.get("bandwidth_share_type")
result["bandwidth_size"] = body.get("bandwidth_size")
result["create_time"] = body.get("create_time")
result["enterprise_project_id"] = body.get("enterprise_project_id")
result["id"] = body.get("id")
result["ip_version"] = body.get("ip_version")
result["port_id"] = body.get("port_id")
result["private_ip_address"] = body.get("private_ip_address")
result["public_ip_address"] = body.get("public_ip_address")
result["public_ipv6_address"] = body.get("public_ipv6_address")
result["status"] = body.get("status")
result["tenant_id"] = body.get("tenant_id")
result["type"] = body.get("type")
return result
if __name__ == '__main__':
main()
| simonwydooghe/ansible | lib/ansible/modules/cloud/huawei/hwc_vpc_eip.py | Python | gpl-3.0 | 26,658 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import itertools
import operator
import warnings
import pretend
import pytest
from packaging.version import Version, LegacyVersion, InvalidVersion, parse
@pytest.mark.parametrize(
("version", "klass"), [("1.0", Version), ("1-1-1", LegacyVersion)]
)
def test_parse(version, klass):
assert isinstance(parse(version), klass)
# This list must be in the correct sorting order
VERSIONS = [
# Implicit epoch of 0
"1.0.dev456",
"1.0a1",
"1.0a2.dev456",
"1.0a12.dev456",
"1.0a12",
"1.0b1.dev456",
"1.0b2",
"1.0b2.post345.dev456",
"1.0b2.post345",
"1.0b2-346",
"1.0c1.dev456",
"1.0c1",
"1.0rc2",
"1.0c3",
"1.0",
"1.0.post456.dev34",
"1.0.post456",
"1.1.dev1",
"1.2+123abc",
"1.2+123abc456",
"1.2+abc",
"1.2+abc123",
"1.2+abc123def",
"1.2+1234.abc",
"1.2+123456",
"1.2.r32+123456",
"1.2.rev33+123456",
# Explicit epoch of 1
"1!1.0.dev456",
"1!1.0a1",
"1!1.0a2.dev456",
"1!1.0a12.dev456",
"1!1.0a12",
"1!1.0b1.dev456",
"1!1.0b2",
"1!1.0b2.post345.dev456",
"1!1.0b2.post345",
"1!1.0b2-346",
"1!1.0c1.dev456",
"1!1.0c1",
"1!1.0rc2",
"1!1.0c3",
"1!1.0",
"1!1.0.post456.dev34",
"1!1.0.post456",
"1!1.1.dev1",
"1!1.2+123abc",
"1!1.2+123abc456",
"1!1.2+abc",
"1!1.2+abc123",
"1!1.2+abc123def",
"1!1.2+1234.abc",
"1!1.2+123456",
"1!1.2.r32+123456",
"1!1.2.rev33+123456",
]
class TestVersion:
@pytest.mark.parametrize("version", VERSIONS)
def test_valid_versions(self, version):
Version(version)
@pytest.mark.parametrize(
"version",
[
# Non sensical versions should be invalid
"french toast",
# Versions with invalid local versions
"1.0+a+",
"1.0++",
"1.0+_foobar",
"1.0+foo&asd",
"1.0+1+1",
],
)
def test_invalid_versions(self, version):
with pytest.raises(InvalidVersion):
Version(version)
@pytest.mark.parametrize(
("version", "normalized"),
[
# Various development release incarnations
("1.0dev", "1.0.dev0"),
("1.0.dev", "1.0.dev0"),
("1.0dev1", "1.0.dev1"),
("1.0dev", "1.0.dev0"),
("1.0-dev", "1.0.dev0"),
("1.0-dev1", "1.0.dev1"),
("1.0DEV", "1.0.dev0"),
("1.0.DEV", "1.0.dev0"),
("1.0DEV1", "1.0.dev1"),
("1.0DEV", "1.0.dev0"),
("1.0.DEV1", "1.0.dev1"),
("1.0-DEV", "1.0.dev0"),
("1.0-DEV1", "1.0.dev1"),
# Various alpha incarnations
("1.0a", "1.0a0"),
("1.0.a", "1.0a0"),
("1.0.a1", "1.0a1"),
("1.0-a", "1.0a0"),
("1.0-a1", "1.0a1"),
("1.0alpha", "1.0a0"),
("1.0.alpha", "1.0a0"),
("1.0.alpha1", "1.0a1"),
("1.0-alpha", "1.0a0"),
("1.0-alpha1", "1.0a1"),
("1.0A", "1.0a0"),
("1.0.A", "1.0a0"),
("1.0.A1", "1.0a1"),
("1.0-A", "1.0a0"),
("1.0-A1", "1.0a1"),
("1.0ALPHA", "1.0a0"),
("1.0.ALPHA", "1.0a0"),
("1.0.ALPHA1", "1.0a1"),
("1.0-ALPHA", "1.0a0"),
("1.0-ALPHA1", "1.0a1"),
# Various beta incarnations
("1.0b", "1.0b0"),
("1.0.b", "1.0b0"),
("1.0.b1", "1.0b1"),
("1.0-b", "1.0b0"),
("1.0-b1", "1.0b1"),
("1.0beta", "1.0b0"),
("1.0.beta", "1.0b0"),
("1.0.beta1", "1.0b1"),
("1.0-beta", "1.0b0"),
("1.0-beta1", "1.0b1"),
("1.0B", "1.0b0"),
("1.0.B", "1.0b0"),
("1.0.B1", "1.0b1"),
("1.0-B", "1.0b0"),
("1.0-B1", "1.0b1"),
("1.0BETA", "1.0b0"),
("1.0.BETA", "1.0b0"),
("1.0.BETA1", "1.0b1"),
("1.0-BETA", "1.0b0"),
("1.0-BETA1", "1.0b1"),
# Various release candidate incarnations
("1.0c", "1.0rc0"),
("1.0.c", "1.0rc0"),
("1.0.c1", "1.0rc1"),
("1.0-c", "1.0rc0"),
("1.0-c1", "1.0rc1"),
("1.0rc", "1.0rc0"),
("1.0.rc", "1.0rc0"),
("1.0.rc1", "1.0rc1"),
("1.0-rc", "1.0rc0"),
("1.0-rc1", "1.0rc1"),
("1.0C", "1.0rc0"),
("1.0.C", "1.0rc0"),
("1.0.C1", "1.0rc1"),
("1.0-C", "1.0rc0"),
("1.0-C1", "1.0rc1"),
("1.0RC", "1.0rc0"),
("1.0.RC", "1.0rc0"),
("1.0.RC1", "1.0rc1"),
("1.0-RC", "1.0rc0"),
("1.0-RC1", "1.0rc1"),
# Various post release incarnations
("1.0post", "1.0.post0"),
("1.0.post", "1.0.post0"),
("1.0post1", "1.0.post1"),
("1.0post", "1.0.post0"),
("1.0-post", "1.0.post0"),
("1.0-post1", "1.0.post1"),
("1.0POST", "1.0.post0"),
("1.0.POST", "1.0.post0"),
("1.0POST1", "1.0.post1"),
("1.0POST", "1.0.post0"),
("1.0r", "1.0.post0"),
("1.0rev", "1.0.post0"),
("1.0.POST1", "1.0.post1"),
("1.0.r1", "1.0.post1"),
("1.0.rev1", "1.0.post1"),
("1.0-POST", "1.0.post0"),
("1.0-POST1", "1.0.post1"),
("1.0-5", "1.0.post5"),
("1.0-r5", "1.0.post5"),
("1.0-rev5", "1.0.post5"),
# Local version case insensitivity
("1.0+AbC", "1.0+abc"),
# Integer Normalization
("1.01", "1.1"),
("1.0a05", "1.0a5"),
("1.0b07", "1.0b7"),
("1.0c056", "1.0rc56"),
("1.0rc09", "1.0rc9"),
("1.0.post000", "1.0.post0"),
("1.1.dev09000", "1.1.dev9000"),
("00!1.2", "1.2"),
("0100!0.0", "100!0.0"),
# Various other normalizations
("v1.0", "1.0"),
(" v1.0\t\n", "1.0"),
],
)
def test_normalized_versions(self, version, normalized):
assert str(Version(version)) == normalized
@pytest.mark.parametrize(
("version", "expected"),
[
("1.0.dev456", "1.0.dev456"),
("1.0a1", "1.0a1"),
("1.0a2.dev456", "1.0a2.dev456"),
("1.0a12.dev456", "1.0a12.dev456"),
("1.0a12", "1.0a12"),
("1.0b1.dev456", "1.0b1.dev456"),
("1.0b2", "1.0b2"),
("1.0b2.post345.dev456", "1.0b2.post345.dev456"),
("1.0b2.post345", "1.0b2.post345"),
("1.0rc1.dev456", "1.0rc1.dev456"),
("1.0rc1", "1.0rc1"),
("1.0", "1.0"),
("1.0.post456.dev34", "1.0.post456.dev34"),
("1.0.post456", "1.0.post456"),
("1.0.1", "1.0.1"),
("0!1.0.2", "1.0.2"),
("1.0.3+7", "1.0.3+7"),
("0!1.0.4+8.0", "1.0.4+8.0"),
("1.0.5+9.5", "1.0.5+9.5"),
("1.2+1234.abc", "1.2+1234.abc"),
("1.2+123456", "1.2+123456"),
("1.2+123abc", "1.2+123abc"),
("1.2+123abc456", "1.2+123abc456"),
("1.2+abc", "1.2+abc"),
("1.2+abc123", "1.2+abc123"),
("1.2+abc123def", "1.2+abc123def"),
("1.1.dev1", "1.1.dev1"),
("7!1.0.dev456", "7!1.0.dev456"),
("7!1.0a1", "7!1.0a1"),
("7!1.0a2.dev456", "7!1.0a2.dev456"),
("7!1.0a12.dev456", "7!1.0a12.dev456"),
("7!1.0a12", "7!1.0a12"),
("7!1.0b1.dev456", "7!1.0b1.dev456"),
("7!1.0b2", "7!1.0b2"),
("7!1.0b2.post345.dev456", "7!1.0b2.post345.dev456"),
("7!1.0b2.post345", "7!1.0b2.post345"),
("7!1.0rc1.dev456", "7!1.0rc1.dev456"),
("7!1.0rc1", "7!1.0rc1"),
("7!1.0", "7!1.0"),
("7!1.0.post456.dev34", "7!1.0.post456.dev34"),
("7!1.0.post456", "7!1.0.post456"),
("7!1.0.1", "7!1.0.1"),
("7!1.0.2", "7!1.0.2"),
("7!1.0.3+7", "7!1.0.3+7"),
("7!1.0.4+8.0", "7!1.0.4+8.0"),
("7!1.0.5+9.5", "7!1.0.5+9.5"),
("7!1.1.dev1", "7!1.1.dev1"),
],
)
def test_version_str_repr(self, version, expected):
assert str(Version(version)) == expected
assert repr(Version(version)) == "<Version({0})>".format(repr(expected))
def test_version_rc_and_c_equals(self):
assert Version("1.0rc1") == Version("1.0c1")
@pytest.mark.parametrize("version", VERSIONS)
def test_version_hash(self, version):
assert hash(Version(version)) == hash(Version(version))
@pytest.mark.parametrize(
("version", "public"),
[
("1.0", "1.0"),
("1.0.dev0", "1.0.dev0"),
("1.0.dev6", "1.0.dev6"),
("1.0a1", "1.0a1"),
("1.0a1.post5", "1.0a1.post5"),
("1.0a1.post5.dev6", "1.0a1.post5.dev6"),
("1.0rc4", "1.0rc4"),
("1.0.post5", "1.0.post5"),
("1!1.0", "1!1.0"),
("1!1.0.dev6", "1!1.0.dev6"),
("1!1.0a1", "1!1.0a1"),
("1!1.0a1.post5", "1!1.0a1.post5"),
("1!1.0a1.post5.dev6", "1!1.0a1.post5.dev6"),
("1!1.0rc4", "1!1.0rc4"),
("1!1.0.post5", "1!1.0.post5"),
("1.0+deadbeef", "1.0"),
("1.0.dev6+deadbeef", "1.0.dev6"),
("1.0a1+deadbeef", "1.0a1"),
("1.0a1.post5+deadbeef", "1.0a1.post5"),
("1.0a1.post5.dev6+deadbeef", "1.0a1.post5.dev6"),
("1.0rc4+deadbeef", "1.0rc4"),
("1.0.post5+deadbeef", "1.0.post5"),
("1!1.0+deadbeef", "1!1.0"),
("1!1.0.dev6+deadbeef", "1!1.0.dev6"),
("1!1.0a1+deadbeef", "1!1.0a1"),
("1!1.0a1.post5+deadbeef", "1!1.0a1.post5"),
("1!1.0a1.post5.dev6+deadbeef", "1!1.0a1.post5.dev6"),
("1!1.0rc4+deadbeef", "1!1.0rc4"),
("1!1.0.post5+deadbeef", "1!1.0.post5"),
],
)
def test_version_public(self, version, public):
assert Version(version).public == public
@pytest.mark.parametrize(
("version", "base_version"),
[
("1.0", "1.0"),
("1.0.dev0", "1.0"),
("1.0.dev6", "1.0"),
("1.0a1", "1.0"),
("1.0a1.post5", "1.0"),
("1.0a1.post5.dev6", "1.0"),
("1.0rc4", "1.0"),
("1.0.post5", "1.0"),
("1!1.0", "1!1.0"),
("1!1.0.dev6", "1!1.0"),
("1!1.0a1", "1!1.0"),
("1!1.0a1.post5", "1!1.0"),
("1!1.0a1.post5.dev6", "1!1.0"),
("1!1.0rc4", "1!1.0"),
("1!1.0.post5", "1!1.0"),
("1.0+deadbeef", "1.0"),
("1.0.dev6+deadbeef", "1.0"),
("1.0a1+deadbeef", "1.0"),
("1.0a1.post5+deadbeef", "1.0"),
("1.0a1.post5.dev6+deadbeef", "1.0"),
("1.0rc4+deadbeef", "1.0"),
("1.0.post5+deadbeef", "1.0"),
("1!1.0+deadbeef", "1!1.0"),
("1!1.0.dev6+deadbeef", "1!1.0"),
("1!1.0a1+deadbeef", "1!1.0"),
("1!1.0a1.post5+deadbeef", "1!1.0"),
("1!1.0a1.post5.dev6+deadbeef", "1!1.0"),
("1!1.0rc4+deadbeef", "1!1.0"),
("1!1.0.post5+deadbeef", "1!1.0"),
],
)
def test_version_base_version(self, version, base_version):
assert Version(version).base_version == base_version
@pytest.mark.parametrize(
("version", "epoch"),
[
("1.0", 0),
("1.0.dev0", 0),
("1.0.dev6", 0),
("1.0a1", 0),
("1.0a1.post5", 0),
("1.0a1.post5.dev6", 0),
("1.0rc4", 0),
("1.0.post5", 0),
("1!1.0", 1),
("1!1.0.dev6", 1),
("1!1.0a1", 1),
("1!1.0a1.post5", 1),
("1!1.0a1.post5.dev6", 1),
("1!1.0rc4", 1),
("1!1.0.post5", 1),
("1.0+deadbeef", 0),
("1.0.dev6+deadbeef", 0),
("1.0a1+deadbeef", 0),
("1.0a1.post5+deadbeef", 0),
("1.0a1.post5.dev6+deadbeef", 0),
("1.0rc4+deadbeef", 0),
("1.0.post5+deadbeef", 0),
("1!1.0+deadbeef", 1),
("1!1.0.dev6+deadbeef", 1),
("1!1.0a1+deadbeef", 1),
("1!1.0a1.post5+deadbeef", 1),
("1!1.0a1.post5.dev6+deadbeef", 1),
("1!1.0rc4+deadbeef", 1),
("1!1.0.post5+deadbeef", 1),
],
)
def test_version_epoch(self, version, epoch):
assert Version(version).epoch == epoch
@pytest.mark.parametrize(
("version", "release"),
[
("1.0", (1, 0)),
("1.0.dev0", (1, 0)),
("1.0.dev6", (1, 0)),
("1.0a1", (1, 0)),
("1.0a1.post5", (1, 0)),
("1.0a1.post5.dev6", (1, 0)),
("1.0rc4", (1, 0)),
("1.0.post5", (1, 0)),
("1!1.0", (1, 0)),
("1!1.0.dev6", (1, 0)),
("1!1.0a1", (1, 0)),
("1!1.0a1.post5", (1, 0)),
("1!1.0a1.post5.dev6", (1, 0)),
("1!1.0rc4", (1, 0)),
("1!1.0.post5", (1, 0)),
("1.0+deadbeef", (1, 0)),
("1.0.dev6+deadbeef", (1, 0)),
("1.0a1+deadbeef", (1, 0)),
("1.0a1.post5+deadbeef", (1, 0)),
("1.0a1.post5.dev6+deadbeef", (1, 0)),
("1.0rc4+deadbeef", (1, 0)),
("1.0.post5+deadbeef", (1, 0)),
("1!1.0+deadbeef", (1, 0)),
("1!1.0.dev6+deadbeef", (1, 0)),
("1!1.0a1+deadbeef", (1, 0)),
("1!1.0a1.post5+deadbeef", (1, 0)),
("1!1.0a1.post5.dev6+deadbeef", (1, 0)),
("1!1.0rc4+deadbeef", (1, 0)),
("1!1.0.post5+deadbeef", (1, 0)),
],
)
def test_version_release(self, version, release):
assert Version(version).release == release
@pytest.mark.parametrize(
("version", "local"),
[
("1.0", None),
("1.0.dev0", None),
("1.0.dev6", None),
("1.0a1", None),
("1.0a1.post5", None),
("1.0a1.post5.dev6", None),
("1.0rc4", None),
("1.0.post5", None),
("1!1.0", None),
("1!1.0.dev6", None),
("1!1.0a1", None),
("1!1.0a1.post5", None),
("1!1.0a1.post5.dev6", None),
("1!1.0rc4", None),
("1!1.0.post5", None),
("1.0+deadbeef", "deadbeef"),
("1.0.dev6+deadbeef", "deadbeef"),
("1.0a1+deadbeef", "deadbeef"),
("1.0a1.post5+deadbeef", "deadbeef"),
("1.0a1.post5.dev6+deadbeef", "deadbeef"),
("1.0rc4+deadbeef", "deadbeef"),
("1.0.post5+deadbeef", "deadbeef"),
("1!1.0+deadbeef", "deadbeef"),
("1!1.0.dev6+deadbeef", "deadbeef"),
("1!1.0a1+deadbeef", "deadbeef"),
("1!1.0a1.post5+deadbeef", "deadbeef"),
("1!1.0a1.post5.dev6+deadbeef", "deadbeef"),
("1!1.0rc4+deadbeef", "deadbeef"),
("1!1.0.post5+deadbeef", "deadbeef"),
],
)
def test_version_local(self, version, local):
assert Version(version).local == local
@pytest.mark.parametrize(
("version", "pre"),
[
("1.0", None),
("1.0.dev0", None),
("1.0.dev6", None),
("1.0a1", ("a", 1)),
("1.0a1.post5", ("a", 1)),
("1.0a1.post5.dev6", ("a", 1)),
("1.0rc4", ("rc", 4)),
("1.0.post5", None),
("1!1.0", None),
("1!1.0.dev6", None),
("1!1.0a1", ("a", 1)),
("1!1.0a1.post5", ("a", 1)),
("1!1.0a1.post5.dev6", ("a", 1)),
("1!1.0rc4", ("rc", 4)),
("1!1.0.post5", None),
("1.0+deadbeef", None),
("1.0.dev6+deadbeef", None),
("1.0a1+deadbeef", ("a", 1)),
("1.0a1.post5+deadbeef", ("a", 1)),
("1.0a1.post5.dev6+deadbeef", ("a", 1)),
("1.0rc4+deadbeef", ("rc", 4)),
("1.0.post5+deadbeef", None),
("1!1.0+deadbeef", None),
("1!1.0.dev6+deadbeef", None),
("1!1.0a1+deadbeef", ("a", 1)),
("1!1.0a1.post5+deadbeef", ("a", 1)),
("1!1.0a1.post5.dev6+deadbeef", ("a", 1)),
("1!1.0rc4+deadbeef", ("rc", 4)),
("1!1.0.post5+deadbeef", None),
],
)
def test_version_pre(self, version, pre):
assert Version(version).pre == pre
@pytest.mark.parametrize(
("version", "expected"),
[
("1.0.dev0", True),
("1.0.dev1", True),
("1.0a1.dev1", True),
("1.0b1.dev1", True),
("1.0c1.dev1", True),
("1.0rc1.dev1", True),
("1.0a1", True),
("1.0b1", True),
("1.0c1", True),
("1.0rc1", True),
("1.0a1.post1.dev1", True),
("1.0b1.post1.dev1", True),
("1.0c1.post1.dev1", True),
("1.0rc1.post1.dev1", True),
("1.0a1.post1", True),
("1.0b1.post1", True),
("1.0c1.post1", True),
("1.0rc1.post1", True),
("1.0", False),
("1.0+dev", False),
("1.0.post1", False),
("1.0.post1+dev", False),
],
)
def test_version_is_prerelease(self, version, expected):
assert Version(version).is_prerelease is expected
@pytest.mark.parametrize(
("version", "dev"),
[
("1.0", None),
("1.0.dev0", 0),
("1.0.dev6", 6),
("1.0a1", None),
("1.0a1.post5", None),
("1.0a1.post5.dev6", 6),
("1.0rc4", None),
("1.0.post5", None),
("1!1.0", None),
("1!1.0.dev6", 6),
("1!1.0a1", None),
("1!1.0a1.post5", None),
("1!1.0a1.post5.dev6", 6),
("1!1.0rc4", None),
("1!1.0.post5", None),
("1.0+deadbeef", None),
("1.0.dev6+deadbeef", 6),
("1.0a1+deadbeef", None),
("1.0a1.post5+deadbeef", None),
("1.0a1.post5.dev6+deadbeef", 6),
("1.0rc4+deadbeef", None),
("1.0.post5+deadbeef", None),
("1!1.0+deadbeef", None),
("1!1.0.dev6+deadbeef", 6),
("1!1.0a1+deadbeef", None),
("1!1.0a1.post5+deadbeef", None),
("1!1.0a1.post5.dev6+deadbeef", 6),
("1!1.0rc4+deadbeef", None),
("1!1.0.post5+deadbeef", None),
],
)
def test_version_dev(self, version, dev):
assert Version(version).dev == dev
@pytest.mark.parametrize(
("version", "expected"),
[
("1.0", False),
("1.0.dev0", True),
("1.0.dev6", True),
("1.0a1", False),
("1.0a1.post5", False),
("1.0a1.post5.dev6", True),
("1.0rc4", False),
("1.0.post5", False),
("1!1.0", False),
("1!1.0.dev6", True),
("1!1.0a1", False),
("1!1.0a1.post5", False),
("1!1.0a1.post5.dev6", True),
("1!1.0rc4", False),
("1!1.0.post5", False),
("1.0+deadbeef", False),
("1.0.dev6+deadbeef", True),
("1.0a1+deadbeef", False),
("1.0a1.post5+deadbeef", False),
("1.0a1.post5.dev6+deadbeef", True),
("1.0rc4+deadbeef", False),
("1.0.post5+deadbeef", False),
("1!1.0+deadbeef", False),
("1!1.0.dev6+deadbeef", True),
("1!1.0a1+deadbeef", False),
("1!1.0a1.post5+deadbeef", False),
("1!1.0a1.post5.dev6+deadbeef", True),
("1!1.0rc4+deadbeef", False),
("1!1.0.post5+deadbeef", False),
],
)
def test_version_is_devrelease(self, version, expected):
assert Version(version).is_devrelease is expected
@pytest.mark.parametrize(
("version", "post"),
[
("1.0", None),
("1.0.dev0", None),
("1.0.dev6", None),
("1.0a1", None),
("1.0a1.post5", 5),
("1.0a1.post5.dev6", 5),
("1.0rc4", None),
("1.0.post5", 5),
("1!1.0", None),
("1!1.0.dev6", None),
("1!1.0a1", None),
("1!1.0a1.post5", 5),
("1!1.0a1.post5.dev6", 5),
("1!1.0rc4", None),
("1!1.0.post5", 5),
("1.0+deadbeef", None),
("1.0.dev6+deadbeef", None),
("1.0a1+deadbeef", None),
("1.0a1.post5+deadbeef", 5),
("1.0a1.post5.dev6+deadbeef", 5),
("1.0rc4+deadbeef", None),
("1.0.post5+deadbeef", 5),
("1!1.0+deadbeef", None),
("1!1.0.dev6+deadbeef", None),
("1!1.0a1+deadbeef", None),
("1!1.0a1.post5+deadbeef", 5),
("1!1.0a1.post5.dev6+deadbeef", 5),
("1!1.0rc4+deadbeef", None),
("1!1.0.post5+deadbeef", 5),
],
)
def test_version_post(self, version, post):
assert Version(version).post == post
@pytest.mark.parametrize(
("version", "expected"),
[
("1.0.dev1", False),
("1.0", False),
("1.0+foo", False),
("1.0.post1.dev1", True),
("1.0.post1", True),
],
)
def test_version_is_postrelease(self, version, expected):
assert Version(version).is_postrelease is expected
@pytest.mark.parametrize(
("left", "right", "op"),
# Below we'll generate every possible combination of VERSIONS that
# should be True for the given operator
itertools.chain(
*
# Verify that the less than (<) operator works correctly
[
[(x, y, operator.lt) for y in VERSIONS[i + 1 :]]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the less than equal (<=) operator works correctly
[
[(x, y, operator.le) for y in VERSIONS[i:]]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the equal (==) operator works correctly
[[(x, x, operator.eq) for x in VERSIONS]]
+
# Verify that the not equal (!=) operator works correctly
[
[(x, y, operator.ne) for j, y in enumerate(VERSIONS) if i != j]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the greater than equal (>=) operator works correctly
[
[(x, y, operator.ge) for y in VERSIONS[: i + 1]]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the greater than (>) operator works correctly
[
[(x, y, operator.gt) for y in VERSIONS[:i]]
for i, x in enumerate(VERSIONS)
]
),
)
def test_comparison_true(self, left, right, op):
assert op(Version(left), Version(right))
@pytest.mark.parametrize(
("left", "right", "op"),
# Below we'll generate every possible combination of VERSIONS that
# should be False for the given operator
itertools.chain(
*
# Verify that the less than (<) operator works correctly
[
[(x, y, operator.lt) for y in VERSIONS[: i + 1]]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the less than equal (<=) operator works correctly
[
[(x, y, operator.le) for y in VERSIONS[:i]]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the equal (==) operator works correctly
[
[(x, y, operator.eq) for j, y in enumerate(VERSIONS) if i != j]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the not equal (!=) operator works correctly
[[(x, x, operator.ne) for x in VERSIONS]]
+
# Verify that the greater than equal (>=) operator works correctly
[
[(x, y, operator.ge) for y in VERSIONS[i + 1 :]]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the greater than (>) operator works correctly
[
[(x, y, operator.gt) for y in VERSIONS[i:]]
for i, x in enumerate(VERSIONS)
]
),
)
def test_comparison_false(self, left, right, op):
assert not op(Version(left), Version(right))
@pytest.mark.parametrize("op", ["lt", "le", "eq", "ge", "gt", "ne"])
def test_dunder_op_returns_notimplemented(self, op):
method = getattr(Version, "__{0}__".format(op))
assert method(Version("1"), 1) is NotImplemented
@pytest.mark.parametrize(("op", "expected"), [("eq", False), ("ne", True)])
def test_compare_other(self, op, expected):
other = pretend.stub(**{"__{0}__".format(op): lambda other: NotImplemented})
assert getattr(operator, op)(Version("1"), other) is expected
def test_compare_legacyversion_version(self):
result = sorted([Version("0"), LegacyVersion("1")])
assert result == [LegacyVersion("1"), Version("0")]
def test_major_version(self):
assert Version("2.1.0").major == 2
def test_minor_version(self):
assert Version("2.1.0").minor == 1
assert Version("2").minor == 0
def test_micro_version(self):
assert Version("2.1.3").micro == 3
assert Version("2.1").micro == 0
assert Version("2").micro == 0
LEGACY_VERSIONS = ["foobar", "a cat is fine too", "lolwut", "1-0", "2.0-a1"]
class TestLegacyVersion:
def test_legacy_version_is_deprecated(self):
with warnings.catch_warnings(record=True) as w:
LegacyVersion("some-legacy-version")
assert len(w) == 1
assert issubclass(w[0].category, DeprecationWarning)
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_valid_legacy_versions(self, version):
LegacyVersion(version)
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_str_repr(self, version):
assert str(LegacyVersion(version)) == version
assert repr(LegacyVersion(version)) == "<LegacyVersion({0})>".format(
repr(version)
)
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_hash(self, version):
assert hash(LegacyVersion(version)) == hash(LegacyVersion(version))
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_public(self, version):
assert LegacyVersion(version).public == version
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_base_version(self, version):
assert LegacyVersion(version).base_version == version
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_epoch(self, version):
assert LegacyVersion(version).epoch == -1
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_release(self, version):
assert LegacyVersion(version).release is None
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_local(self, version):
assert LegacyVersion(version).local is None
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_pre(self, version):
assert LegacyVersion(version).pre is None
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_is_prerelease(self, version):
assert not LegacyVersion(version).is_prerelease
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_dev(self, version):
assert LegacyVersion(version).dev is None
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_is_devrelease(self, version):
assert not LegacyVersion(version).is_devrelease
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_post(self, version):
assert LegacyVersion(version).post is None
@pytest.mark.parametrize("version", VERSIONS + LEGACY_VERSIONS)
def test_legacy_version_is_postrelease(self, version):
assert not LegacyVersion(version).is_postrelease
@pytest.mark.parametrize(
("left", "right", "op"),
# Below we'll generate every possible combination of
# VERSIONS + LEGACY_VERSIONS that should be True for the given operator
itertools.chain(
*
# Verify that the equal (==) operator works correctly
[[(x, x, operator.eq) for x in VERSIONS + LEGACY_VERSIONS]]
+
# Verify that the not equal (!=) operator works correctly
[
[
(x, y, operator.ne)
for j, y in enumerate(VERSIONS + LEGACY_VERSIONS)
if i != j
]
for i, x in enumerate(VERSIONS + LEGACY_VERSIONS)
]
),
)
def test_comparison_true(self, left, right, op):
assert op(LegacyVersion(left), LegacyVersion(right))
@pytest.mark.parametrize(
("left", "right", "op"),
# Below we'll generate every possible combination of
# VERSIONS + LEGACY_VERSIONS that should be False for the given
# operator
itertools.chain(
*
# Verify that the equal (==) operator works correctly
[
[
(x, y, operator.eq)
for j, y in enumerate(VERSIONS + LEGACY_VERSIONS)
if i != j
]
for i, x in enumerate(VERSIONS + LEGACY_VERSIONS)
]
+
# Verify that the not equal (!=) operator works correctly
[[(x, x, operator.ne) for x in VERSIONS + LEGACY_VERSIONS]]
),
)
def test_comparison_false(self, left, right, op):
assert not op(LegacyVersion(left), LegacyVersion(right))
@pytest.mark.parametrize("op", ["lt", "le", "eq", "ge", "gt", "ne"])
def test_dunder_op_returns_notimplemented(self, op):
method = getattr(LegacyVersion, "__{0}__".format(op))
assert method(LegacyVersion("1"), 1) is NotImplemented
@pytest.mark.parametrize(("op", "expected"), [("eq", False), ("ne", True)])
def test_compare_other(self, op, expected):
other = pretend.stub(**{"__{0}__".format(op): lambda other: NotImplemented})
assert getattr(operator, op)(LegacyVersion("1"), other) is expected
| CYBAI/servo | tests/wpt/web-platform-tests/tools/third_party/packaging/tests/test_version.py | Python | mpl-2.0 | 31,863 |
import numpy as np
import matplotlib.pyplot as plt
mu, sigma = 2, 0.5
v = np.random.normal(mu, sigma, 10000)
plt.hist(v, bins=50, density=1)
plt.show()
<caret> | smmribeiro/intellij-community | python/testData/codeInsight/mlcompletion/prev2calls/assignmentVisitorTwoDifferentPackages.py | Python | apache-2.0 | 159 |
"""task_duration
Revision ID: 2e541a1dcfed
Revises: 1b38cef5b76e
Create Date: 2015-10-28 20:38:41.266143
"""
# revision identifiers, used by Alembic.
revision = '2e541a1dcfed'
down_revision = '1b38cef5b76e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("task_instance") as batch_op:
batch_op.alter_column('duration',
existing_type=mysql.INTEGER(display_width=11),
type_=sa.Float(),
existing_nullable=True)
def downgrade():
pass
| mtustin-handy/airflow | airflow/migrations/versions/2e541a1dcfed_task_duration.py | Python | apache-2.0 | 718 |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ldb_monitor
short_description: Configure server load balancing health monitors in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and ldb_monitor category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_ldb_monitor:
description:
- Configure server load balancing health monitors.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
http_get:
description:
- URL used to send a GET request to check the health of an HTTP server.
type: str
http_match:
description:
- String to match the value expected in response to an HTTP-GET request.
type: str
http_max_redirects:
description:
- The maximum number of HTTP redirects to be allowed (0 - 5).
type: int
interval:
description:
- Time between health checks (5 - 65635 sec).
type: int
name:
description:
- Monitor name.
required: true
type: str
port:
description:
- Service port used to perform the health check. If 0, health check monitor inherits port configured for the server (0 - 65635).
type: int
retry:
description:
- Number health check attempts before the server is considered down (1 - 255).
type: int
timeout:
description:
- Time to wait to receive response to a health check from a server. Reaching the timeout means the health check failed (1 - 255 sec).
type: int
type:
description:
- Select the Monitor type used by the health check monitor to check the health of the server (PING | TCP | HTTP).
type: str
choices:
- ping
- tcp
- http
- passive-sip
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure server load balancing health monitors.
fortios_firewall_ldb_monitor:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_ldb_monitor:
http_get: "<your_own_value>"
http_match: "<your_own_value>"
http_max_redirects: "5"
interval: "6"
name: "default_name_7"
port: "8"
retry: "9"
timeout: "10"
type: "ping"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_ldb_monitor_data(json):
option_list = ['http_get', 'http_match', 'http_max_redirects',
'interval', 'name', 'port',
'retry', 'timeout', 'type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_ldb_monitor(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_ldb_monitor'] and data['firewall_ldb_monitor']:
state = data['firewall_ldb_monitor']['state']
else:
state = True
firewall_ldb_monitor_data = data['firewall_ldb_monitor']
filtered_data = underscore_to_hyphen(filter_firewall_ldb_monitor_data(firewall_ldb_monitor_data))
if state == "present":
return fos.set('firewall',
'ldb-monitor',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'ldb-monitor',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_ldb_monitor']:
resp = firewall_ldb_monitor(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_ldb_monitor": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"http_get": {"required": False, "type": "str"},
"http_match": {"required": False, "type": "str"},
"http_max_redirects": {"required": False, "type": "int"},
"interval": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"port": {"required": False, "type": "int"},
"retry": {"required": False, "type": "int"},
"timeout": {"required": False, "type": "int"},
"type": {"required": False, "type": "str",
"choices": ["ping", "tcp", "http",
"passive-sip"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| thaim/ansible | lib/ansible/modules/network/fortios/fortios_firewall_ldb_monitor.py | Python | mit | 12,596 |
import netrc, os, unittest, sys, textwrap
from test import test_support
temp_filename = test_support.TESTFN
class NetrcTestCase(unittest.TestCase):
def tearDown(self):
os.unlink(temp_filename)
def make_nrc(self, test_data):
test_data = textwrap.dedent(test_data)
mode = 'w'
if sys.platform != 'cygwin':
mode += 't'
with open(temp_filename, mode) as fp:
fp.write(test_data)
return netrc.netrc(temp_filename)
def test_default(self):
nrc = self.make_nrc("""\
machine host1.domain.com login log1 password pass1 account acct1
default login log2 password pass2
""")
self.assertEqual(nrc.hosts['host1.domain.com'],
('log1', 'acct1', 'pass1'))
self.assertEqual(nrc.hosts['default'], ('log2', None, 'pass2'))
def test_macros(self):
nrc = self.make_nrc("""\
macdef macro1
line1
line2
macdef macro2
line3
line4
""")
self.assertEqual(nrc.macros, {'macro1': ['line1\n', 'line2\n'],
'macro2': ['line3\n', 'line4\n']})
def _test_passwords(self, nrc, passwd):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['host.domain.com'], ('log', 'acct', passwd))
def test_password_with_leading_hash(self):
self._test_passwords("""\
machine host.domain.com login log password #pass account acct
""", '#pass')
def test_password_with_trailing_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pass# account acct
""", 'pass#')
def test_password_with_internal_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pa#ss account acct
""", 'pa#ss')
def _test_comment(self, nrc, passwd='pass'):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['foo.domain.com'], ('bar', None, passwd))
self.assertEqual(nrc.hosts['bar.domain.com'], ('foo', None, 'pass'))
def test_comment_before_machine_line(self):
self._test_comment("""\
# comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
def test_comment_before_machine_line_no_space(self):
self._test_comment("""\
#comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
def test_comment_before_machine_line_hash_only(self):
self._test_comment("""\
#
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
def test_comment_at_end_of_machine_line(self):
self._test_comment("""\
machine foo.domain.com login bar password pass # comment
machine bar.domain.com login foo password pass
""")
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
def test_comment_at_end_of_machine_line_no_space(self):
self._test_comment("""\
machine foo.domain.com login bar password pass #comment
machine bar.domain.com login foo password pass
""")
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
def test_comment_at_end_of_machine_line_pass_has_hash(self):
self._test_comment("""\
machine foo.domain.com login bar password #pass #comment
machine bar.domain.com login foo password pass
""", '#pass')
def test_main():
test_support.run_unittest(NetrcTestCase)
if __name__ == "__main__":
test_main()
| adaussy/eclipse-monkey-revival | plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_netrc.py | Python | epl-1.0 | 4,100 |
def f(spam, eggs):
"""
:type spam: list of string
:type eggs: (bool, int, unicode)
"""
return spam, eggs
def test():
f(<warning descr="Expected type 'list[Union[str, unicode]]', got 'list[int]' instead">[1, 2, 3]</warning>,
<warning descr="Expected type 'Tuple[bool, int, unicode]', got 'Tuple[bool, int, str]' instead">(False, 2, '')</warning>)
| amith01994/intellij-community | python/testData/inspections/PyTypeCheckerInspection/ListTuple.py | Python | apache-2.0 | 378 |
class My<caret>Class:
pass | jwren/intellij-community | python/testData/qualifiedName/topLevelClassReference/pkg/subpkg/mod.py | Python | apache-2.0 | 30 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
return decorator_utils.add_notice_to_docstring(
doc, instructions,
'DEPRECATED FUNCTION',
'(deprecated)', [
'THIS FUNCTION IS DEPRECATED. It will be removed %s.' % (
'in a future version' if date is None else ('after %s' % date)),
'Instructions for updating:'])
def _add_deprecated_arg_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
return decorator_utils.add_notice_to_docstring(
doc, instructions,
'DEPRECATED FUNCTION ARGUMENTS',
'(deprecated arguments)', [
'SOME ARGUMENTS ARE DEPRECATED. '
'They will be removed %s.' % (
'in a future version' if date is None else ('after %s' % date)),
'Instructions for updating:'])
def _validate_deprecation_args(date, instructions):
if date is not None and not re.match(r'20\d\d-[01]\d-[0123]\d', date):
raise ValueError('Date must be YYYY-MM-DD.')
if not instructions:
raise ValueError('Don\'t deprecate things without conversion instructions!')
def _call_location():
"""Returns call location given level up from current call."""
frame = tf_inspect.currentframe()
if frame:
# CPython internals are available, use them for performance.
# walk back two frames to get to deprecated function caller.
first_frame = frame.f_back
second_frame = first_frame.f_back
frame = second_frame if second_frame else first_frame
return '%s:%d' % (frame.f_code.co_filename, frame.f_lineno)
else:
# Slow fallback path
stack = tf_inspect.stack(0) # 0 avoids generating unused context
entry = stack[2]
return '%s:%d' % (entry[1], entry[2])
def deprecated(date, instructions):
"""Decorator for marking functions or methods deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called. It has the following format:
<function> (from <module>) is deprecated and will be removed after <date>.
Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated)' is appended
to the first line of the docstring and a deprecation notice is prepended
to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
def deprecated_wrapper(func):
"""Deprecation wrapper."""
decorator_utils.validate_callable(func, 'deprecated')
@functools.wraps(func)
def new_func(*args, **kwargs):
logging.warning(
'From %s: %s (from %s) is deprecated and will be removed %s.\n'
'Instructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
return tf_decorator.make_decorator(
func, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(func.__doc__, date,
instructions))
return deprecated_wrapper
DeprecatedArgSpec = collections.namedtuple(
'DeprecatedArgSpec', ['position', 'has_ok_value', 'ok_value'])
def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples):
"""Decorator for marking specific function arguments as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument. It has the following format:
Calling <function> (from <module>) with <arg> is deprecated and will be
removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> includes the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
*deprecated_arg_names_or_tuples: String. or 2-Tuple(String,
[ok_vals]). The string is the deprecated argument name.
Optionally, an ok-value may be provided. If the user provided
argument equals this value, the warning is suppressed.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, instructions are
empty, the deprecated arguments are not present in the function
signature, or the second element of a deprecated_tuple is not a
list.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_arg_names_or_tuples:
raise ValueError('Specify which argument is deprecated.')
def _get_arg_names_to_ok_vals():
"""Returns a dict mapping arg_name to DeprecatedArgSpec w/o position."""
d = {}
for name_or_tuple in deprecated_arg_names_or_tuples:
if isinstance(name_or_tuple, tuple):
d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1])
else:
d[name_or_tuple] = DeprecatedArgSpec(-1, False, None)
return d
def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec):
"""Builds a dictionary from deprecated arguments to their spec.
Returned dict is keyed by argument name.
Each value is a DeprecatedArgSpec with the following fields:
position: The zero-based argument position of the argument
within the signature. None if the argument isn't found in
the signature.
ok_values: Values of this argument for which warning will be
suppressed.
Args:
names_to_ok_vals: dict from string arg_name to a list of values,
possibly empty, which should not elicit a warning.
arg_spec: Output from tf_inspect.getargspec on the called function.
Returns:
Dictionary from arg_name to DeprecatedArgSpec.
"""
arg_name_to_pos = dict(
(name, pos) for (pos, name) in enumerate(arg_spec.args))
deprecated_positional_args = {}
for arg_name, spec in iter(names_to_ok_vals.items()):
if arg_name in arg_name_to_pos:
pos = arg_name_to_pos[arg_name]
deprecated_positional_args[arg_name] = DeprecatedArgSpec(
pos, spec.has_ok_value, spec.ok_value)
return deprecated_positional_args
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_args')
deprecated_arg_names = _get_arg_names_to_ok_vals()
arg_spec = tf_inspect.getargspec(func)
deprecated_positions = _get_deprecated_positional_arguments(
deprecated_arg_names, arg_spec)
is_varargs_deprecated = arg_spec.varargs in deprecated_arg_names
is_kwargs_deprecated = arg_spec.keywords in deprecated_arg_names
if (len(deprecated_positions) + is_varargs_deprecated + is_kwargs_deprecated
!= len(deprecated_arg_names_or_tuples)):
known_args = arg_spec.args + [arg_spec.varargs, arg_spec.keywords]
missing_args = [arg_name for arg_name in deprecated_arg_names
if arg_name not in known_args]
raise ValueError('The following deprecated arguments are not present '
'in the function signature: %s. '
'Found next arguments: %s.' % (missing_args, known_args))
def _same_value(a, b):
"""A comparison operation that works for multiple object types.
Returns True for two empty lists, two numeric values with the
same value, etc.
Returns False for (pd.DataFrame, None), and other pairs which
should not be considered equivalent.
Args:
a: value one of the comparison.
b: value two of the comparison.
Returns:
A boolean indicating whether the two inputs are the same value
for the purposes of deprecation.
"""
if a is b:
return True
try:
equality = a == b
if isinstance(equality, bool):
return equality
except TypeError:
return False
return False
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
invalid_args = []
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, spec in iter(deprecated_positions.items()):
if (spec.position < len(args) and
not (spec.has_ok_value and
_same_value(named_args[arg_name], spec.ok_value))):
invalid_args.append(arg_name)
if is_varargs_deprecated and len(args) > len(arg_spec.args):
invalid_args.append(arg_spec.varargs)
if is_kwargs_deprecated and kwargs:
invalid_args.append(arg_spec.keywords)
for arg_name in deprecated_arg_names:
if (arg_name in kwargs and
not (deprecated_positions[arg_name].has_ok_value and
_same_value(named_args[arg_name],
deprecated_positions[arg_name].ok_value))):
invalid_args.append(arg_name)
for arg_name in invalid_args:
logging.warning(
'From %s: calling %s (from %s) with %s is deprecated and will '
'be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
return tf_decorator.make_decorator(func, new_func, 'deprecated',
_add_deprecated_arg_notice_to_docstring(
func.__doc__, date, instructions))
return deprecated_wrapper
def deprecated_arg_values(date, instructions, **deprecated_kwargs):
"""Decorator for marking specific function argument values as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument values. It has the following format:
Calling <function> (from <module>) with <arg>=<value> is deprecated and
will be removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None
instructions: String. Instructions on how to update code using the
deprecated function.
**deprecated_kwargs: The deprecated argument values.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_kwargs:
raise ValueError('Specify which argument values are deprecated.')
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_arg_values')
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, arg_value in deprecated_kwargs.items():
if arg_name in named_args and named_args[arg_name] == arg_value:
logging.warning(
'From %s: calling %s (from %s) with %s=%s is deprecated and will '
'be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name, arg_value,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
return tf_decorator.make_decorator(func, new_func, 'deprecated',
_add_deprecated_arg_notice_to_docstring(
func.__doc__, date, instructions))
return deprecated_wrapper
def deprecated_argument_lookup(new_name, new_value, old_name, old_value):
"""Looks up deprecated argument name and ensures both are not used.
Args:
new_name: new name of argument
new_value: value of new argument (or None if not used)
old_name: old name of argument
old_value: value of old argument (or None if not used)
Returns:
The effective argument that should be used.
Raises:
ValueError: if new_value and old_value are both non-null
"""
if old_value is not None:
if new_value is not None:
raise ValueError("Cannot specify both '%s' and '%s'" %
(old_name, new_name))
return old_value
return new_value
def rewrite_argument_docstring(old_doc, old_argument, new_argument):
return old_doc.replace('`%s`' % old_argument, '`%s`' % new_argument).replace(
'%s:' % old_argument, '%s:' % new_argument)
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/python/util/deprecation.py | Python | bsd-2-clause | 15,050 |
#openerp.loggers.handlers. -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ConfigParser
import optparse
import os
import sys
import openerp
import openerp.conf
import openerp.loglevels as loglevels
import logging
import openerp.release as release
import appdirs
class MyOption (optparse.Option, object):
""" optparse Option with two additional attributes.
The list of command line options (getopt.Option) is used to create the
list of the configuration file options. When reading the file, and then
reading the command line arguments, we don't want optparse.parse results
to override the configuration file values. But if we provide default
values to optparse, optparse will return them and we can't know if they
were really provided by the user or not. A solution is to not use
optparse's default attribute, but use a custom one (that will be copied
to create the default values of the configuration file).
"""
def __init__(self, *opts, **attrs):
self.my_default = attrs.pop('my_default', None)
super(MyOption, self).__init__(*opts, **attrs)
DEFAULT_LOG_HANDLER = ':INFO'
def _get_default_datadir():
home = os.path.expanduser('~')
if os.path.isdir(home):
func = appdirs.user_data_dir
else:
if sys.platform in ['win32', 'darwin']:
func = appdirs.site_data_dir
else:
func = lambda **kwarg: "/var/lib/%s" % kwarg['appname'].lower()
# No "version" kwarg as session and filestore paths are shared against series
return func(appname=release.product_name, appauthor=release.author)
def _deduplicate_loggers(loggers):
""" Avoid saving multiple logging levels for the same loggers to a save
file, that just takes space and the list can potentially grow unbounded
if for some odd reason people use :option`odoo.py --save`` all the time.
"""
# dict(iterable) -> the last item of iterable for any given key wins,
# which is what we want and expect. Output order should not matter as
# there are no duplicates within the output sequence
return (
'{}:{}'.format(logger, level)
for logger, level in dict(it.split(':') for it in loggers).iteritems()
)
class configmanager(object):
def __init__(self, fname=None):
"""Constructor.
:param fname: a shortcut allowing to instantiate :class:`configmanager`
from Python code without resorting to environment
variable
"""
# Options not exposed on the command line. Command line options will be added
# from optparse's parser.
self.options = {
'admin_passwd': 'admin',
'csv_internal_sep': ',',
'publisher_warranty_url': 'http://services.openerp.com/publisher-warranty/',
'reportgz': False,
'root_path': None,
}
# Not exposed in the configuration file.
self.blacklist_for_save = set([
'publisher_warranty_url', 'load_language', 'root_path',
'init', 'save', 'config', 'update', 'stop_after_init'
])
# dictionary mapping option destination (keys in self.options) to MyOptions.
self.casts = {}
self.misc = {}
self.config_file = fname
self._LOGLEVELS = dict([
(getattr(loglevels, 'LOG_%s' % x), getattr(logging, x))
for x in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET')
])
version = "%s %s" % (release.description, release.version)
self.parser = parser = optparse.OptionParser(version=version, option_class=MyOption)
# Server startup config
group = optparse.OptionGroup(parser, "Common options")
group.add_option("-c", "--config", dest="config", help="specify alternate config file")
group.add_option("-s", "--save", action="store_true", dest="save", default=False,
help="save configuration to ~/.openerp_serverrc")
group.add_option("-i", "--init", dest="init", help="install one or more modules (comma-separated list, use \"all\" for all modules), requires -d")
group.add_option("-u", "--update", dest="update",
help="update one or more modules (comma-separated list, use \"all\" for all modules). Requires -d.")
group.add_option("--without-demo", dest="without_demo",
help="disable loading demo data for modules to be installed (comma-separated, use \"all\" for all modules). Requires -d and -i. Default is %default",
my_default=False)
group.add_option("-P", "--import-partial", dest="import_partial", my_default='',
help="Use this for big data importation, if it crashes you will be able to continue at the current state. Provide a filename to store intermediate importation states.")
group.add_option("--pidfile", dest="pidfile", help="file where the server pid will be stored")
group.add_option("--addons-path", dest="addons_path",
help="specify additional addons paths (separated by commas).",
action="callback", callback=self._check_addons_path, nargs=1, type="string")
group.add_option("--load", dest="server_wide_modules", help="Comma-separated list of server-wide modules default=web")
group.add_option("-D", "--data-dir", dest="data_dir", my_default=_get_default_datadir(),
help="Directory where to store Odoo data")
parser.add_option_group(group)
# XML-RPC / HTTP
group = optparse.OptionGroup(parser, "XML-RPC Configuration")
group.add_option("--xmlrpc-interface", dest="xmlrpc_interface", my_default='',
help="Specify the TCP IP address for the XML-RPC protocol. The empty string binds to all interfaces.")
group.add_option("--xmlrpc-port", dest="xmlrpc_port", my_default=8069,
help="specify the TCP port for the XML-RPC protocol", type="int")
group.add_option("--no-xmlrpc", dest="xmlrpc", action="store_false", my_default=True,
help="disable the XML-RPC protocol")
group.add_option("--proxy-mode", dest="proxy_mode", action="store_true", my_default=False,
help="Enable correct behavior when behind a reverse proxy")
group.add_option("--longpolling-port", dest="longpolling_port", my_default=8072,
help="specify the TCP port for longpolling requests", type="int")
parser.add_option_group(group)
# WEB
group = optparse.OptionGroup(parser, "Web interface Configuration")
group.add_option("--db-filter", dest="dbfilter", my_default='.*',
help="Filter listed database", metavar="REGEXP")
parser.add_option_group(group)
# Testing Group
group = optparse.OptionGroup(parser, "Testing Configuration")
group.add_option("--test-file", dest="test_file", my_default=False,
help="Launch a python or YML test file.")
group.add_option("--test-report-directory", dest="test_report_directory", my_default=False,
help="If set, will save sample of all reports in this directory.")
group.add_option("--test-enable", action="store_true", dest="test_enable",
my_default=False, help="Enable YAML and unit tests.")
group.add_option("--test-commit", action="store_true", dest="test_commit",
my_default=False, help="Commit database changes performed by YAML or XML tests.")
parser.add_option_group(group)
# Logging Group
group = optparse.OptionGroup(parser, "Logging Configuration")
group.add_option("--logfile", dest="logfile", help="file where the server log will be stored")
group.add_option("--logrotate", dest="logrotate", action="store_true", my_default=False, help="enable logfile rotation")
group.add_option("--syslog", action="store_true", dest="syslog", my_default=False, help="Send the log to the syslog server")
group.add_option('--log-handler', action="append", default=[], my_default=DEFAULT_LOG_HANDLER, metavar="PREFIX:LEVEL", help='setup a handler at LEVEL for a given PREFIX. An empty PREFIX indicates the root logger. This option can be repeated. Example: "openerp.orm:DEBUG" or "werkzeug:CRITICAL" (default: ":INFO")')
group.add_option('--log-request', action="append_const", dest="log_handler", const="openerp.http.rpc.request:DEBUG", help='shortcut for --log-handler=openerp.http.rpc.request:DEBUG')
group.add_option('--log-response', action="append_const", dest="log_handler", const="openerp.http.rpc.response:DEBUG", help='shortcut for --log-handler=openerp.http.rpc.response:DEBUG')
group.add_option('--log-web', action="append_const", dest="log_handler", const="openerp.http:DEBUG", help='shortcut for --log-handler=openerp.http:DEBUG')
group.add_option('--log-sql', action="append_const", dest="log_handler", const="openerp.sql_db:DEBUG", help='shortcut for --log-handler=openerp.sql_db:DEBUG')
group.add_option('--log-db', dest='log_db', help="Logging database", my_default=False)
group.add_option('--log-db-level', dest='log_db_level', my_default='warning', help="Logging database level")
# For backward-compatibility, map the old log levels to something
# quite close.
levels = [
'info', 'debug_rpc', 'warn', 'test', 'critical',
'debug_sql', 'error', 'debug', 'debug_rpc_answer', 'notset'
]
group.add_option('--log-level', dest='log_level', type='choice',
choices=levels, my_default='info',
help='specify the level of the logging. Accepted values: %s.' % (levels,))
parser.add_option_group(group)
# SMTP Group
group = optparse.OptionGroup(parser, "SMTP Configuration")
group.add_option('--email-from', dest='email_from', my_default=False,
help='specify the SMTP email address for sending email')
group.add_option('--smtp', dest='smtp_server', my_default='localhost',
help='specify the SMTP server for sending email')
group.add_option('--smtp-port', dest='smtp_port', my_default=25,
help='specify the SMTP port', type="int")
group.add_option('--smtp-ssl', dest='smtp_ssl', action='store_true', my_default=False,
help='if passed, SMTP connections will be encrypted with SSL (STARTTLS)')
group.add_option('--smtp-user', dest='smtp_user', my_default=False,
help='specify the SMTP username for sending email')
group.add_option('--smtp-password', dest='smtp_password', my_default=False,
help='specify the SMTP password for sending email')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Database related options")
group.add_option("-d", "--database", dest="db_name", my_default=False,
help="specify the database name")
group.add_option("-r", "--db_user", dest="db_user", my_default=False,
help="specify the database user name")
group.add_option("-w", "--db_password", dest="db_password", my_default=False,
help="specify the database password")
group.add_option("--pg_path", dest="pg_path", help="specify the pg executable path")
group.add_option("--db_host", dest="db_host", my_default=False,
help="specify the database host")
group.add_option("--db_port", dest="db_port", my_default=False,
help="specify the database port", type="int")
group.add_option("--db_maxconn", dest="db_maxconn", type='int', my_default=64,
help="specify the the maximum number of physical connections to posgresql")
group.add_option("--db-template", dest="db_template", my_default="template1",
help="specify a custom database template to create a new database")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Internationalisation options",
"Use these options to translate Odoo to another language."
"See i18n section of the user manual. Option '-d' is mandatory."
"Option '-l' is mandatory in case of importation"
)
group.add_option('--load-language', dest="load_language",
help="specifies the languages for the translations you want to be loaded")
group.add_option('-l', "--language", dest="language",
help="specify the language of the translation file. Use it with --i18n-export or --i18n-import")
group.add_option("--i18n-export", dest="translate_out",
help="export all sentences to be translated to a CSV file, a PO file or a TGZ archive and exit")
group.add_option("--i18n-import", dest="translate_in",
help="import a CSV or a PO file with translations and exit. The '-l' option is required.")
group.add_option("--i18n-overwrite", dest="overwrite_existing_translations", action="store_true", my_default=False,
help="overwrites existing translation terms on updating a module or importing a CSV or a PO file.")
group.add_option("--modules", dest="translate_modules",
help="specify modules to export. Use in combination with --i18n-export")
parser.add_option_group(group)
security = optparse.OptionGroup(parser, 'Security-related options')
security.add_option('--no-database-list', action="store_false", dest='list_db', my_default=True,
help="disable the ability to return the list of databases")
parser.add_option_group(security)
# Advanced options
group = optparse.OptionGroup(parser, "Advanced options")
group.add_option('--dev', dest='dev_mode', action='store_true', my_default=False, help='enable developper mode')
group.add_option('--debug', dest='debug_mode', action='store_true', my_default=False, help='enable debug mode')
group.add_option("--stop-after-init", action="store_true", dest="stop_after_init", my_default=False,
help="stop the server after its initialization")
group.add_option("--osv-memory-count-limit", dest="osv_memory_count_limit", my_default=False,
help="Force a limit on the maximum number of records kept in the virtual "
"osv_memory tables. The default is False, which means no count-based limit.",
type="int")
group.add_option("--osv-memory-age-limit", dest="osv_memory_age_limit", my_default=1.0,
help="Force a limit on the maximum age of records kept in the virtual "
"osv_memory tables. This is a decimal value expressed in hours, "
"and the default is 1 hour.",
type="float")
group.add_option("--max-cron-threads", dest="max_cron_threads", my_default=2,
help="Maximum number of threads processing concurrently cron jobs (default 2).",
type="int")
group.add_option("--unaccent", dest="unaccent", my_default=False, action="store_true",
help="Use the unaccent function provided by the database when available.")
group.add_option("--geoip-db", dest="geoip_database", my_default='/usr/share/GeoIP/GeoLiteCity.dat',
help="Absolute path to the GeoIP database file.")
parser.add_option_group(group)
if os.name == 'posix':
group = optparse.OptionGroup(parser, "Multiprocessing options")
# TODO sensible default for the three following limits.
group.add_option("--workers", dest="workers", my_default=0,
help="Specify the number of workers, 0 disable prefork mode.",
type="int")
group.add_option("--limit-memory-soft", dest="limit_memory_soft", my_default=2048 * 1024 * 1024,
help="Maximum allowed virtual memory per worker, when reached the worker be reset after the current request (default 671088640 aka 640MB).",
type="int")
group.add_option("--limit-memory-hard", dest="limit_memory_hard", my_default=2560 * 1024 * 1024,
help="Maximum allowed virtual memory per worker, when reached, any memory allocation will fail (default 805306368 aka 768MB).",
type="int")
group.add_option("--limit-time-cpu", dest="limit_time_cpu", my_default=60,
help="Maximum allowed CPU time per request (default 60).",
type="int")
group.add_option("--limit-time-real", dest="limit_time_real", my_default=120,
help="Maximum allowed Real time per request (default 120).",
type="int")
group.add_option("--limit-request", dest="limit_request", my_default=8192,
help="Maximum number of request to be processed per worker (default 8192).",
type="int")
parser.add_option_group(group)
# Copy all optparse options (i.e. MyOption) into self.options.
for group in parser.option_groups:
for option in group.option_list:
if option.dest not in self.options:
self.options[option.dest] = option.my_default
self.casts[option.dest] = option
# generate default config
self._parse_config()
def parse_config(self, args=None):
""" Parse the configuration file (if any) and the command-line
arguments.
This method initializes openerp.tools.config and openerp.conf (the
former should be removed in the furture) with library-wide
configuration values.
This method must be called before proper usage of this library can be
made.
Typical usage of this method:
openerp.tools.config.parse_config(sys.argv[1:])
"""
self._parse_config(args)
openerp.netsvc.init_logger()
openerp.modules.module.initialize_sys_path()
def _parse_config(self, args=None):
if args is None:
args = []
opt, args = self.parser.parse_args(args)
def die(cond, msg):
if cond:
self.parser.error(msg)
# Ensures no illegitimate argument is silently discarded (avoids insidious "hyphen to dash" problem)
die(args, "unrecognized parameters: '%s'" % " ".join(args))
die(bool(opt.syslog) and bool(opt.logfile),
"the syslog and logfile options are exclusive")
die(opt.translate_in and (not opt.language or not opt.db_name),
"the i18n-import option cannot be used without the language (-l) and the database (-d) options")
die(opt.overwrite_existing_translations and not (opt.translate_in or opt.update),
"the i18n-overwrite option cannot be used without the i18n-import option or without the update option")
die(opt.translate_out and (not opt.db_name),
"the i18n-export option cannot be used without the database (-d) option")
# Check if the config file exists (-c used, but not -s)
die(not opt.save and opt.config and not os.access(opt.config, os.R_OK),
"The config file '%s' selected with -c/--config doesn't exist or is not readable, "\
"use -s/--save if you want to generate it"% opt.config)
# place/search the config file on Win32 near the server installation
# (../etc from the server)
# if the server is run by an unprivileged user, he has to specify location of a config file where he has the rights to write,
# else he won't be able to save the configurations, or even to start the server...
# TODO use appdirs
if os.name == 'nt':
rcfilepath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'openerp-server.conf')
else:
rcfilepath = os.path.expanduser('~/.openerp_serverrc')
self.rcfile = os.path.abspath(
self.config_file or opt.config or os.environ.get('OPENERP_SERVER') or rcfilepath)
self.load()
# Verify that we want to log or not, if not the output will go to stdout
if self.options['logfile'] in ('None', 'False'):
self.options['logfile'] = False
# the same for the pidfile
if self.options['pidfile'] in ('None', 'False'):
self.options['pidfile'] = False
# if defined dont take the configfile value even if the defined value is None
keys = ['xmlrpc_interface', 'xmlrpc_port', 'longpolling_port',
'db_name', 'db_user', 'db_password', 'db_host',
'db_port', 'db_template', 'logfile', 'pidfile', 'smtp_port',
'email_from', 'smtp_server', 'smtp_user', 'smtp_password',
'db_maxconn', 'import_partial', 'addons_path',
'xmlrpc', 'syslog', 'without_demo',
'dbfilter', 'log_level', 'log_db',
'log_db_level', 'geoip_database',
]
for arg in keys:
# Copy the command-line argument (except the special case for log_handler, due to
# action=append requiring a real default, so we cannot use the my_default workaround)
if getattr(opt, arg):
self.options[arg] = getattr(opt, arg)
# ... or keep, but cast, the config file value.
elif isinstance(self.options[arg], basestring) and self.casts[arg].type in optparse.Option.TYPE_CHECKER:
self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg])
if isinstance(self.options['log_handler'], basestring):
self.options['log_handler'] = self.options['log_handler'].split(',')
self.options['log_handler'].extend(opt.log_handler)
# if defined but None take the configfile value
keys = [
'language', 'translate_out', 'translate_in', 'overwrite_existing_translations',
'debug_mode', 'dev_mode', 'smtp_ssl', 'load_language',
'stop_after_init', 'logrotate', 'without_demo', 'xmlrpc', 'syslog',
'list_db', 'proxy_mode',
'test_file', 'test_enable', 'test_commit', 'test_report_directory',
'osv_memory_count_limit', 'osv_memory_age_limit', 'max_cron_threads', 'unaccent',
'data_dir',
]
posix_keys = [
'workers',
'limit_memory_hard', 'limit_memory_soft',
'limit_time_cpu', 'limit_time_real', 'limit_request',
]
if os.name == 'posix':
keys += posix_keys
else:
self.options.update(dict.fromkeys(posix_keys, None))
# Copy the command-line arguments...
for arg in keys:
if getattr(opt, arg) is not None:
self.options[arg] = getattr(opt, arg)
# ... or keep, but cast, the config file value.
elif isinstance(self.options[arg], basestring) and self.casts[arg].type in optparse.Option.TYPE_CHECKER:
self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg])
self.options['root_path'] = os.path.abspath(os.path.expanduser(os.path.expandvars(os.path.dirname(openerp.__file__))))
if not self.options['addons_path'] or self.options['addons_path']=='None':
default_addons = []
base_addons = os.path.join(self.options['root_path'], 'addons')
if os.path.exists(base_addons):
default_addons.append(base_addons)
main_addons = os.path.abspath(os.path.join(self.options['root_path'], '../addons'))
if os.path.exists(main_addons):
default_addons.append(main_addons)
self.options['addons_path'] = ','.join(default_addons)
else:
self.options['addons_path'] = ",".join(
os.path.abspath(os.path.expanduser(os.path.expandvars(x.strip())))
for x in self.options['addons_path'].split(','))
self.options['init'] = opt.init and dict.fromkeys(opt.init.split(','), 1) or {}
self.options["demo"] = not opt.without_demo and self.options['init'] or {}
self.options['update'] = opt.update and dict.fromkeys(opt.update.split(','), 1) or {}
self.options['translate_modules'] = opt.translate_modules and map(lambda m: m.strip(), opt.translate_modules.split(',')) or ['all']
self.options['translate_modules'].sort()
if opt.pg_path:
self.options['pg_path'] = opt.pg_path
if self.options.get('language', False):
if len(self.options['language']) > 5:
raise Exception('ERROR: The Lang name must take max 5 chars, Eg: -lfr_BE')
if opt.save:
self.save()
openerp.conf.addons_paths = self.options['addons_path'].split(',')
if opt.server_wide_modules:
openerp.conf.server_wide_modules = map(lambda m: m.strip(), opt.server_wide_modules.split(','))
else:
openerp.conf.server_wide_modules = ['web','web_kanban']
def _is_addons_path(self, path):
for f in os.listdir(path):
modpath = os.path.join(path, f)
if os.path.isdir(modpath):
def hasfile(filename):
return os.path.isfile(os.path.join(modpath, filename))
if hasfile('__init__.py') and (hasfile('__openerp__.py') or hasfile('__terp__.py')):
return True
return False
def _check_addons_path(self, option, opt, value, parser):
ad_paths = []
for path in value.split(','):
path = path.strip()
res = os.path.abspath(os.path.expanduser(path))
if not os.path.isdir(res):
raise optparse.OptionValueError("option %s: no such directory: %r" % (opt, path))
if not self._is_addons_path(res):
raise optparse.OptionValueError("option %s: The addons-path %r does not seem to a be a valid Addons Directory!" % (opt, path))
ad_paths.append(res)
setattr(parser.values, option.dest, ",".join(ad_paths))
def load(self):
p = ConfigParser.ConfigParser()
try:
p.read([self.rcfile])
for (name,value) in p.items('options'):
if value=='True' or value=='true':
value = True
if value=='False' or value=='false':
value = False
self.options[name] = value
#parse the other sections, as well
for sec in p.sections():
if sec == 'options':
continue
if not self.misc.has_key(sec):
self.misc[sec]= {}
for (name, value) in p.items(sec):
if value=='True' or value=='true':
value = True
if value=='False' or value=='false':
value = False
self.misc[sec][name] = value
except IOError:
pass
except ConfigParser.NoSectionError:
pass
def save(self):
p = ConfigParser.ConfigParser()
loglevelnames = dict(zip(self._LOGLEVELS.values(), self._LOGLEVELS.keys()))
p.add_section('options')
for opt in sorted(self.options.keys()):
if opt in ('version', 'language', 'translate_out', 'translate_in', 'overwrite_existing_translations', 'init', 'update'):
continue
if opt in self.blacklist_for_save:
continue
if opt in ('log_level',):
p.set('options', opt, loglevelnames.get(self.options[opt], self.options[opt]))
elif opt == 'log_handler':
p.set('options', opt, ','.join(_deduplicate_loggers(self.options[opt])))
else:
p.set('options', opt, self.options[opt])
for sec in sorted(self.misc.keys()):
p.add_section(sec)
for opt in sorted(self.misc[sec].keys()):
p.set(sec,opt,self.misc[sec][opt])
# try to create the directories and write the file
try:
rc_exists = os.path.exists(self.rcfile)
if not rc_exists and not os.path.exists(os.path.dirname(self.rcfile)):
os.makedirs(os.path.dirname(self.rcfile))
try:
p.write(file(self.rcfile, 'w'))
if not rc_exists:
os.chmod(self.rcfile, 0600)
except IOError:
sys.stderr.write("ERROR: couldn't write the config file\n")
except OSError:
# what to do if impossible?
sys.stderr.write("ERROR: couldn't create the config directory\n")
def get(self, key, default=None):
return self.options.get(key, default)
def pop(self, key, default=None):
return self.options.pop(key, default)
def get_misc(self, sect, key, default=None):
return self.misc.get(sect,{}).get(key, default)
def __setitem__(self, key, value):
self.options[key] = value
if key in self.options and isinstance(self.options[key], basestring) and \
key in self.casts and self.casts[key].type in optparse.Option.TYPE_CHECKER:
self.options[key] = optparse.Option.TYPE_CHECKER[self.casts[key].type](self.casts[key], key, self.options[key])
def __getitem__(self, key):
return self.options[key]
@property
def addons_data_dir(self):
d = os.path.join(self['data_dir'], 'addons', release.series)
if not os.path.exists(d):
os.makedirs(d, 0700)
else:
assert os.access(d, os.W_OK), \
"%s: directory is not writable" % d
return d
@property
def session_dir(self):
d = os.path.join(self['data_dir'], 'sessions')
if not os.path.exists(d):
os.makedirs(d, 0700)
else:
assert os.access(d, os.W_OK), \
"%s: directory is not writable" % d
return d
def filestore(self, dbname):
return os.path.join(self['data_dir'], 'filestore', dbname)
config = configmanager()
| angelapper/odoo | openerp/tools/config.py | Python | agpl-3.0 | 31,112 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from . import stock_move
| raycarnes/odoomrp-utils | stock_move_partner_info/models/__init__.py | Python | agpl-3.0 | 286 |
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.sqs
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <[email protected]> @monkeysecurity
"""
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey.constants import TROUBLE_REGIONS
from security_monkey.exceptions import InvalidAWSJSON
from security_monkey.exceptions import BotoConnectionIssue
from security_monkey import app
import json
import boto
from boto.sqs import regions
class SQS(Watcher):
index = 'sqs'
i_am_singular = 'SQS Policy'
i_am_plural = 'SQS Policies'
def __init__(self, accounts=None, debug=False):
super(SQS, self).__init__(accounts=accounts, debug=debug)
def slurp(self):
"""
:returns: item_list - list of SQS Policies.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
item_list = []
exception_map = {}
from security_monkey.common.sts_connect import connect
for account in self.accounts:
for region in regions():
app.logger.debug("Checking {}/{}/{}".format(SQS.index, account, region.name))
try:
sqs = connect(account, 'sqs', region=region)
all_queues = self.wrap_aws_rate_limited_call(
sqs.get_all_queues
)
except Exception as e:
if region.name not in TROUBLE_REGIONS:
exc = BotoConnectionIssue(str(e), 'sqs', account, region.name)
self.slurp_exception((self.index, account, region.name), exc, exception_map)
continue
app.logger.debug("Found {} {}".format(len(all_queues), SQS.i_am_plural))
for q in all_queues:
if self.check_ignore_list(q.name):
continue
try:
policy = self.wrap_aws_rate_limited_call(
q.get_attributes,
attributes='Policy'
)
if 'Policy' in policy:
try:
json_str = policy['Policy']
policy = json.loads(json_str)
item = SQSItem(region=region.name, account=account, name=q.name,
config=policy)
item_list.append(item)
except:
self.slurp_exception((self.index, account, region, q.name), InvalidAWSJSON(json_str), exception_map)
except boto.exception.SQSError:
# A number of Queues are so ephemeral that they may be gone by the time
# the code reaches here. Just ignore them and move on.
pass
return item_list, exception_map
class SQSItem(ChangeItem):
def __init__(self, region=None, account=None, name=None, config={}):
super(SQSItem, self).__init__(
index=SQS.index,
region=region,
account=account,
name=name,
new_config=config)
| pradeep-aradhya/security_monkey | security_monkey/watchers/sqs.py | Python | apache-2.0 | 4,045 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fakes For Scheduler tests.
"""
import six
from nova import objects
from nova.scheduler import host_manager
NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
COMPUTE_NODES = [
objects.ComputeNode(
id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=None, free_ram_mb=512, vcpus_used=1,
free_disk_gb=512, local_gb_used=0, updated_at=None,
host='host1', hypervisor_hostname='node1', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
objects.ComputeNode(
id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_gb=1024, local_gb_used=0, updated_at=None,
host='host2', hypervisor_hostname='node2', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
objects.ComputeNode(
id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
free_disk_gb=3072, local_gb_used=0, updated_at=None,
host='host3', hypervisor_hostname='node3', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=NUMA_TOPOLOGY._to_json(),
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
objects.ComputeNode(
id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_gb=8888, local_gb_used=0, updated_at=None,
host='host4', hypervisor_hostname='node4', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
# Broken entry
objects.ComputeNode(
id=5, local_gb=1024, memory_mb=1024, vcpus=1,
host='fake', hypervisor_hostname='fake-hyp'),
]
SERVICES = [
objects.Service(host='host1', disabled=False),
objects.Service(host='host2', disabled=True),
objects.Service(host='host3', disabled=False),
objects.Service(host='host4', disabled=False),
]
def get_service_by_host(host):
services = [service for service in SERVICES if service.host == host]
return services[0]
class FakeHostState(host_manager.HostState):
def __init__(self, host, node, attribute_dict, instances=None):
super(FakeHostState, self).__init__(host, node)
if instances:
self.instances = {inst.uuid: inst for inst in instances}
else:
self.instances = {}
for (key, val) in six.iteritems(attribute_dict):
setattr(self, key, val)
| scripnichenko/nova | nova/tests/unit/scheduler/fakes.py | Python | apache-2.0 | 4,507 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from horizon.test.settings import * # noqa
from horizon.utils import secret_key
from openstack_dashboard import exceptions
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.abspath(os.path.join(TEST_DIR, ".."))
STATIC_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'static'))
SECRET_KEY = secret_key.generate_or_read_from_file(
os.path.join(TEST_DIR, '.secret_key_store'))
ROOT_URLCONF = 'openstack_dashboard.urls'
TEMPLATE_DIRS = (
os.path.join(TEST_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS += (
'openstack_dashboard.context_processors.openstack',
)
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.messages',
'django.contrib.humanize',
'django_nose',
'openstack_auth',
'compressor',
'horizon',
'openstack_dashboard',
'openstack_dashboard.dashboards.project',
'openstack_dashboard.dashboards.admin',
'openstack_dashboard.dashboards.identity',
'openstack_dashboard.dashboards.settings',
'openstack_dashboard.dashboards.router',
)
AUTHENTICATION_BACKENDS = ('openstack_auth.backend.KeystoneBackend',)
SITE_BRANDING = 'OpenStack'
HORIZON_CONFIG = {
'dashboards': ('project', 'admin', 'identity', 'settings', 'router',),
'default_dashboard': 'project',
"password_validator": {
"regex": '^.{8,18}$',
"help_text": "Password must be between 8 and 18 characters."
},
'user_home': None,
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
'angular_modules': [],
'js_files': [],
}
# Set to True to allow users to upload images to glance via Horizon server.
# When enabled, a file form field will appear on the create image form.
# See documentation for deployment considerations.
HORIZON_IMAGES_ALLOW_UPLOAD = True
AVAILABLE_REGIONS = [
('http://localhost:5000/v2.0', 'local'),
('http://remote:5000/v2.0', 'remote'),
]
OPENSTACK_API_VERSIONS = {
"identity": 3
}
OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v2.0"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'test_domain'
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True
}
OPENSTACK_CINDER_FEATURES = {
'enable_backup': True,
}
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': True,
'enable_quotas': False, # Enabled in specific tests only
# Parameters below (enable_lb, enable_firewall, enable_vpn)
# control if these panels are displayed or not,
# i.e. they only affect the navigation menu.
# These panels are registered even if enable_XXX is False,
# so we don't need to set them to True in most unit tests
# to avoid stubbing neutron extension check calls.
'enable_lb': False,
'enable_firewall': False,
'enable_vpn': False,
'profile_support': None,
'enable_distributed_router': False,
# 'profile_support': 'cisco'
}
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
'can_set_password': True,
}
OPENSTACK_IMAGE_BACKEND = {
'image_formats': [
('', 'Select format'),
('aki', 'AKI - Amazon Kernel Image'),
('ami', 'AMI - Amazon Machine Image'),
('ari', 'ARI - Amazon Ramdisk Image'),
('iso', 'ISO - Optical Disk Image'),
('qcow2', 'QCOW2 - QEMU Emulator'),
('raw', 'Raw'),
('vdi', 'VDI'),
('vhd', 'VHD'),
('vmdk', 'VMDK')
]
}
LOGGING['loggers'].update(
{
'openstack_dashboard': {
'handlers': ['test'],
'propagate': False,
},
'openstack_auth': {
'handlers': ['test'],
'propagate': False,
},
'novaclient': {
'handlers': ['test'],
'propagate': False,
},
'keystoneclient': {
'handlers': ['test'],
'propagate': False,
},
'glanceclient': {
'handlers': ['test'],
'propagate': False,
},
'neutronclient': {
'handlers': ['test'],
'propagate': False,
},
'iso8601': {
'handlers': ['null'],
'propagate': False,
},
}
)
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': 'ALL TCP',
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'http': {
'name': 'HTTP',
'ip_protocol': 'tcp',
'from_port': '80',
'to_port': '80',
},
}
NOSE_ARGS = ['--nocapture',
'--nologcapture',
'--cover-package=openstack_dashboard',
'--cover-inclusive',
'--all-modules']
POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
POLICY_FILES = {
'identity': 'keystone_policy.json',
'compute': 'nova_policy.json'
}
# The openstack_auth.user.Token object isn't JSON-serializable ATM
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
| zouyapeng/horizon-newtouch | openstack_dashboard/test/settings.py | Python | apache-2.0 | 5,898 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import os
import shutil
import subprocess
import sys
if sys.platform in ['win32', 'cygwin']:
EXE_SUFFIX = '.exe'
else:
EXE_SUFFIX = ''
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
ANDROID_DIR = os.path.join(ROOT_DIR, 'android')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
retcode = subprocess.call(*args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareAndroidTree():
"""Prepare an Android tree to run 'android' format tests."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber Android checkout@@@'
shutil.rmtree(ANDROID_DIR)
# The release of Android we use is static, so there's no need to do anything
# if the directory already exists.
if os.path.isdir(ANDROID_DIR):
return
print '@@@BUILD_STEP Initialize Android checkout@@@'
os.mkdir(ANDROID_DIR)
CallSubProcess(['git', 'config', '--global', 'user.name', 'trybot'])
CallSubProcess(['git', 'config', '--global',
'user.email', '[email protected]'])
CallSubProcess(['git', 'config', '--global', 'color.ui', 'false'])
CallSubProcess(
['repo', 'init',
'-u', 'https://android.googlesource.com/platform/manifest',
'-b', 'android-4.2.1_r1',
'-g', 'all,-notdefault,-device,-darwin,-mips,-x86'],
cwd=ANDROID_DIR)
print '@@@BUILD_STEP Sync Android@@@'
CallSubProcess(['repo', 'sync', '-j4'], cwd=ANDROID_DIR)
print '@@@BUILD_STEP Build Android@@@'
CallSubProcess(
['/bin/bash',
'-c', 'source build/envsetup.sh && lunch full-eng && make -j4'],
cwd=ANDROID_DIR)
def GypTestFormat(title, format=None, msvs_version=None):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
# TODO(bradnelson): remove this when this issue is resolved:
# http://code.google.com/p/chromium/issues/detail?id=108251
if format == 'ninja':
env['NOGOLD'] = '1'
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'trunk/gyptest.py',
'--all',
'--passed',
'--format', format,
'--chdir', 'trunk',
'--path', '../scons'])
if format == 'android':
# gyptest needs the environment setup from envsetup/lunch in order to build
# using the 'android' backend, so this is done in a single shell.
retcode = subprocess.call(
['/bin/bash',
'-c', 'source build/envsetup.sh && lunch full-eng && cd %s && %s'
% (ROOT_DIR, command)],
cwd=ANDROID_DIR, env=env)
else:
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
# The Android gyp bot runs on linux so this must be tested first.
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-android':
PrepareAndroidTree()
retcode += GypTestFormat('android')
elif sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('scons')
retcode += GypTestFormat('make')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('msvs-2008', format='msvs', msvs_version='2008')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-2010', format='msvs', msvs_version='2010')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
| AVarfolomeev/picasso-graphic | tools/gyp/buildbot/buildbot_run.py | Python | bsd-3-clause | 4,948 |
import Orange
data = Orange.data.Table("lenses")
myope_subset = [d for d in data if d["prescription"] == "myope"]
new_data = Orange.data.Table(data.domain, myope_subset)
new_data.save("lenses-subset.tab")
| marinkaz/orange3 | doc/data-mining-library/source/tutorial/code/data-save.py | Python | bsd-2-clause | 205 |
import vstruct
from vstruct.primitives import *
class IMAGE_BASE_RELOCATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VirtualAddress = v_uint32()
self.SizeOfBlock = v_uint32()
class IMAGE_DATA_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VirtualAddress = v_uint32()
self.Size = v_uint32()
class IMAGE_DOS_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.e_magic = v_uint16()
self.e_cblp = v_uint16()
self.e_cp = v_uint16()
self.e_crlc = v_uint16()
self.e_cparhdr = v_uint16()
self.e_minalloc = v_uint16()
self.e_maxalloc = v_uint16()
self.e_ss = v_uint16()
self.e_sp = v_uint16()
self.e_csum = v_uint16()
self.e_ip = v_uint16()
self.e_cs = v_uint16()
self.e_lfarlc = v_uint16()
self.e_ovno = v_uint16()
self.e_res = vstruct.VArray([v_uint16() for i in range(4)])
self.e_oemid = v_uint16()
self.e_oeminfo = v_uint16()
self.e_res2 = vstruct.VArray([v_uint16() for i in range(10)])
self.e_lfanew = v_uint32()
class IMAGE_EXPORT_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Characteristics = v_uint32()
self.TimeDateStamp = v_uint32()
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.Name = v_uint32()
self.Base = v_uint32()
self.NumberOfFunctions = v_uint32()
self.NumberOfNames = v_uint32()
self.AddressOfFunctions = v_uint32()
self.AddressOfNames = v_uint32()
self.AddressOfOrdinals = v_uint32()
class IMAGE_FILE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Machine = v_uint16()
self.NumberOfSections = v_uint16()
self.TimeDateStamp = v_uint32()
self.PointerToSymbolTable = v_uint32()
self.NumberOfSymbols = v_uint32()
self.SizeOfOptionalHeader = v_uint16()
self.Ccharacteristics = v_uint16()
class IMAGE_IMPORT_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OriginalFirstThunk = v_uint32()
self.TimeDateStamp = v_uint32()
self.ForwarderChain = v_uint32()
self.Name = v_uint32()
self.FirstThunk = v_uint32()
class IMAGE_IMPORT_BY_NAME(vstruct.VStruct):
def __init__(self, namelen=128):
vstruct.VStruct.__init__(self)
self.Hint = v_uint16()
self.Name = v_str(size=namelen)
class IMAGE_LOAD_CONFIG_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint32()
self.TimeDateStamp = v_uint32()
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.GlobalFlagsClear = v_uint32()
self.GlobalFlagsSet = v_uint32()
self.CriticalSectionDefaultTimeout = v_uint32()
self.DeCommitFreeBlockThreshold = v_uint32()
self.DeCommitTotalFreeThreshold = v_uint32()
self.LockPrefixTable = v_uint32()
self.MaximumAllocationSize = v_uint32()
self.VirtualMemoryThreshold = v_uint32()
self.ProcessHeapFlags = v_uint32()
self.ProcessAffinityMask = v_uint32()
self.CSDVersion = v_uint16()
self.Reserved1 = v_uint16()
self.EditList = v_uint32()
self.SecurityCookie = v_uint32()
self.SEHandlerTable = v_uint32()
self.SEHandlerCount = v_uint32()
class IMAGE_NT_HEADERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_bytes(4)
self.FileHeader = IMAGE_FILE_HEADER()
self.OptionalHeader = IMAGE_OPTIONAL_HEADER()
class IMAGE_NT_HEADERS64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_bytes(4)
self.FileHeader = IMAGE_FILE_HEADER()
self.OptionalHeader = IMAGE_OPTIONAL_HEADER64()
class IMAGE_OPTIONAL_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Magic = v_bytes(2)
self.MajorLinkerVersion = v_uint8()
self.MinorLinkerVersion = v_uint8()
self.SizeOfCode = v_uint32()
self.SizeOfInitializedData = v_uint32()
self.SizeOfUninitializedData = v_uint32()
self.AddressOfEntryPoint = v_uint32()
self.BaseOfCode = v_uint32()
self.BaseOfData = v_uint32()
self.ImageBase = v_uint32()
self.SectionAlignment = v_uint32()
self.FileAlignment = v_uint32()
self.MajorOperatingSystemVersion = v_uint16()
self.MinorOperatingSystemVersion = v_uint16()
self.MajorImageVersion = v_uint16()
self.MinorImageVersion = v_uint16()
self.MajorSubsystemVersion = v_uint16()
self.MinorSubsystemVersion = v_uint16()
self.Win32VersionValue = v_uint32()
self.SizeOfImage = v_uint32()
self.SizeOfHeaders = v_uint32()
self.CheckSum = v_uint32()
self.Subsystem = v_uint16()
self.DllCharacteristics = v_uint16()
self.SizeOfStackReserve = v_uint32()
self.SizeOfStackCommit = v_uint32()
self.SizeOfHeapReserve = v_uint32()
self.SizeOfHeapCommit = v_uint32()
self.LoaderFlags = v_uint32()
self.NumberOfRvaAndSizes = v_uint32()
self.DataDirectory = vstruct.VArray([IMAGE_DATA_DIRECTORY() for i in range(16)])
class IMAGE_OPTIONAL_HEADER64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Magic = v_bytes(2)
self.MajorLinkerVersion = v_uint8()
self.MinorLinkerVersion = v_uint8()
self.SizeOfCode = v_uint32()
self.SizeOfInitializedData = v_uint32()
self.SizeOfUninitializedData = v_uint32()
self.AddressOfEntryPoint = v_uint32()
self.BaseOfCode = v_uint32()
self.ImageBase = v_uint64()
self.SectionAlignment = v_uint32()
self.FileAlignment = v_uint32()
self.MajorOperatingSystemVersion = v_uint16()
self.MinorOperatingSystemVersion = v_uint16()
self.MajorImageVersion = v_uint16()
self.MinorImageVersion = v_uint16()
self.MajorSubsystemVersion = v_uint16()
self.MinorSubsystemVersion = v_uint16()
self.Win32VersionValue = v_uint32()
self.SizeOfImage = v_uint32()
self.SizeOfHeaders = v_uint32()
self.CheckSum = v_uint32()
self.Subsystem = v_uint16()
self.DllCharacteristics = v_uint16()
self.SizeOfStackReserve = v_uint64()
self.SizeOfStackCommit = v_uint64()
self.SizeOfHeapReserve = v_uint64()
self.SizeOfHeapCommit = v_uint64()
self.LoaderFlags = v_uint32()
self.NumberOfRvaAndSizes = v_uint32()
self.DataDirectory = vstruct.VArray([IMAGE_DATA_DIRECTORY() for i in range(16)])
class IMAGE_RESOURCE_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Characteristics = v_uint32()
self.TimeDateStamp = v_uint32()
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.NumberOfNamedEntries = v_uint16()
self.NumberOfIdEntries = v_uint16()
class IMAGE_RESOURCE_DIRECTORY_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Name = v_uint32()
self.OffsetToData = v_uint32()
class IMAGE_RESOURCE_DATA_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OffsetToData = v_uint32()
self.Size = v_uint32()
self.CodePage = v_uint32()
self.Reserved = v_uint32()
class VS_FIXEDFILEINFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.StrucVersion = v_uint32()
self.FileVersionMS = v_uint32()
self.FileVersionLS = v_uint32()
self.ProductVersionMS = v_uint32()
self.ProductVersionLS = v_uint32()
self.FileFlagsMask = v_uint32()
self.FileFlags = v_uint32()
self.FileOS = v_uint32()
self.FileType = v_uint32()
self.FileSubtype = v_uint32()
self.FileDateMS = v_uint32()
self.FileDateLS = v_uint32()
class IMAGE_SECTION_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Name = v_str(8)
self.VirtualSize = v_uint32()
self.VirtualAddress = v_uint32()
self.SizeOfRawData = v_uint32()
self.PointerToRawData = v_uint32()
self.PointerToRelocations = v_uint32()
self.PointerToLineNumbers = v_uint32()
self.NumberOfRelocations = v_uint16()
self.NumberOfLineNumbers = v_uint16()
self.Characteristics = v_uint32()
class IMAGE_RUNTIME_FUNCTION_ENTRY(vstruct.VStruct):
"""
Used in the .pdata section of a PE32+ for all non
leaf functions.
"""
def __init__(self):
vstruct.VStruct.__init__(self)
self.BeginAddress = v_uint32()
self.EndAddress = v_uint32()
self.UnwindInfoAddress = v_uint32()
| foreni-administrator/pyew | vstruct/defs/pe.py | Python | gpl-2.0 | 10,708 |
'''Pexpect is a Python module for spawning child applications and controlling
them automatically. Pexpect can be used for automating interactive applications
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
scripts for duplicating software package installations on different servers. It
can be used for automated software testing. Pexpect is in the spirit of Don
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
require TCL and Expect or require C extensions to be compiled. Pexpect does not
use C, Expect, or TCL extensions. It should work on any platform that supports
the standard Python pty module. The Pexpect interface focuses on ease of use so
that simple tasks are easy.
There are two main interfaces to the Pexpect system; these are the function,
run() and the class, spawn. The spawn class is more powerful. The run()
function is simpler than spawn, and is good for quickly calling program. When
you call the run() function it executes a given program and then returns the
output. This is a handy replacement for os.system().
For example::
pexpect.run('ls -la')
The spawn class is the more powerful interface to the Pexpect system. You can
use this to spawn a child program then interact with it by sending input and
expecting responses (waiting for patterns in the child's output).
For example::
child = pexpect.spawn('scp foo [email protected]:.')
child.expect('Password:')
child.sendline(mypassword)
This works even for commands that ask for passwords or other input outside of
the normal stdio streams. For example, ssh reads input directly from the TTY
device which bypasses stdin.
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
Spiegel, Jan Grant, and Shane Kerr. Let me know if I forgot anyone.
Pexpect is free, open source, and all that good stuff.
http://pexpect.sourceforge.net/
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import sys
PY3 = (sys.version_info[0] >= 3)
from .exceptions import ExceptionPexpect, EOF, TIMEOUT
from .utils import split_command_line, which, is_executable_file
from .expect import Expecter, searcher_re, searcher_string
if sys.platform != 'win32':
# On Unix, these are available at the top level for backwards compatibility
from .pty_spawn import spawn, spawnu
from .run import run, runu
__version__ = '4.6.0'
__revision__ = ''
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnu', 'run', 'runu',
'which', 'split_command_line', '__version__', '__revision__']
# vim: set shiftround expandtab tabstop=4 shiftwidth=4 ft=python autoindent :
| endlessm/chromium-browser | third_party/llvm/lldb/third_party/Python/module/pexpect-4.6/pexpect/__init__.py | Python | bsd-3-clause | 3,902 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for stevedore.example.simple
"""
from stevedore.example import simple
from stevedore.tests import utils
class TestExampleSimple(utils.TestCase):
def test_simple_items(self):
f = simple.Simple(100)
text = ''.join(f.format({'a': 'A', 'b': 'B'}))
expected = '\n'.join([
'a = A',
'b = B',
'',
])
self.assertEqual(text, expected)
| ctrlaltdel/neutrinator | vendor/stevedore/tests/test_example_simple.py | Python | gpl-3.0 | 972 |
# Copyright (c) 2013 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as qexception
from neutron.extensions import providernet as pnet
SEGMENTS = 'segments'
class SegmentsSetInConjunctionWithProviders(qexception.InvalidInput):
message = _("Segments and provider values cannot both be set.")
class SegmentsContainDuplicateEntry(qexception.InvalidInput):
message = _("Duplicate segment entry in request.")
def _convert_and_validate_segments(segments, valid_values=None):
for segment in segments:
segment.setdefault(pnet.NETWORK_TYPE, attr.ATTR_NOT_SPECIFIED)
segment.setdefault(pnet.PHYSICAL_NETWORK, attr.ATTR_NOT_SPECIFIED)
segmentation_id = segment.get(pnet.SEGMENTATION_ID)
if segmentation_id:
segment[pnet.SEGMENTATION_ID] = attr.convert_to_int(
segmentation_id)
else:
segment[pnet.SEGMENTATION_ID] = attr.ATTR_NOT_SPECIFIED
if len(segment.keys()) != 3:
msg = (_("Unrecognized attribute(s) '%s'") %
', '.join(set(segment.keys()) -
set([pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID])))
raise webob.exc.HTTPBadRequest(msg)
def check_duplicate_segments(segments, is_partial_func=None):
"""Helper function checking duplicate segments.
If is_partial_funcs is specified and not None, then
SegmentsContainDuplicateEntry is raised if two segments are identical and
non partially defined (is_partial_func(segment) == False).
Otherwise SegmentsContainDuplicateEntry is raised if two segment are
identical.
"""
if is_partial_func is not None:
segments = [s for s in segments if not is_partial_func(s)]
fully_specifieds = [tuple(sorted(s.items())) for s in segments]
if len(set(fully_specifieds)) != len(fully_specifieds):
raise SegmentsContainDuplicateEntry()
attr.validators['type:convert_segments'] = (
_convert_and_validate_segments)
EXTENDED_ATTRIBUTES_2_0 = {
'networks': {
SEGMENTS: {'allow_post': True, 'allow_put': True,
'validate': {'type:convert_segments': None},
'convert_list_to': attr.convert_kvp_list_to_dict,
'default': attr.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
}
}
class Multiprovidernet(extensions.ExtensionDescriptor):
"""Extension class supporting multiple provider networks.
This class is used by neutron's extension framework to make
metadata about the multiple provider network extension available to
clients. No new resources are defined by this extension. Instead,
the existing network resource's request and response messages are
extended with 'segments' attribute.
With admin rights, network dictionaries returned will also include
'segments' attribute.
"""
@classmethod
def get_name(cls):
return "Multi Provider Network"
@classmethod
def get_alias(cls):
return "multi-provider"
@classmethod
def get_description(cls):
return ("Expose mapping of virtual networks to multiple physical "
"networks")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/multi-provider/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-06-27T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| virtualopensystems/neutron | neutron/extensions/multiprovidernet.py | Python | apache-2.0 | 4,327 |
import Skype4Py
if __name__ == '__main__':
skype = Skype4Py.Skype()
skype.FriendlyName = 'main'
skype.Attach()
print 'Your Skypename:'
print ' ', skype.CurrentUserHandle
print 'Your contacts:'
for user in skype.Friends:
print ' ', user.Handle
| FloatingGhost/skype4py | examples/py2exe/main.py | Python | bsd-3-clause | 294 |
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_iscsi
short_description: NetApp ONTAP manage iSCSI service
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- create, delete, start, stop iSCSI service on SVM.
options:
state:
description:
- Whether the service should be present or deleted.
choices: ['present', 'absent']
default: present
service_state:
description:
- Whether the specified service should running .
choices: ['started', 'stopped']
vserver:
required: true
description:
- The name of the vserver to use.
'''
EXAMPLES = """
- name: Create iscsi service
na_ontap_iscsi:
state: present
service_state: started
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Stop Iscsi service
na_ontap_iscsi:
state: present
service_state: stopped
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Delete Iscsi service
na_ontap_iscsi:
state: absent
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapISCSI(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=[
'present', 'absent'], default='present'),
service_state=dict(required=False, choices=[
'started', 'stopped'], default=None),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
params = self.module.params
# set up state variables
self.state = params['state']
self.service_state = params['service_state']
if self.state == 'present' and self.service_state is None:
self.service_state = 'started'
self.vserver = params['vserver']
self.is_started = None
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(
module=self.module, vserver=self.vserver)
def get_iscsi(self):
"""
Return details about the iscsi service
:return: Details about the iscsi service
:rtype: dict
"""
iscsi_info = netapp_utils.zapi.NaElement('iscsi-service-get-iter')
iscsi_attributes = netapp_utils.zapi.NaElement('iscsi-service-info')
iscsi_attributes.add_new_child('vserver', self.vserver)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(iscsi_attributes)
iscsi_info.add_child_elem(query)
result = self.server.invoke_successfully(iscsi_info, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
iscsi = result.get_child_by_name(
'attributes-list').get_child_by_name('iscsi-service-info')
if iscsi:
is_started = iscsi.get_child_content('is-available') == 'true'
return_value = {
'is_started': is_started
}
return return_value
def create_iscsi_service(self):
"""
Create iscsi service and start if requested
"""
iscsi_service = netapp_utils.zapi.NaElement.create_node_with_children(
'iscsi-service-create',
**{'start': 'true' if self.state == 'started' else 'false'
})
try:
self.server.invoke_successfully(
iscsi_service, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error creating iscsi service: % s"
% (to_native(e)),
exception=traceback.format_exc())
def delete_iscsi_service(self):
"""
Delete the iscsi service
"""
if self.is_started:
self.stop_iscsi_service()
iscsi_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'iscsi-service-destroy')
try:
self.server.invoke_successfully(
iscsi_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error deleting iscsi service \
on vserver %s: %s"
% (self.vserver, to_native(e)),
exception=traceback.format_exc())
def stop_iscsi_service(self):
"""
Stop iscsi service
"""
iscsi_stop = netapp_utils.zapi.NaElement.create_node_with_children(
'iscsi-service-stop')
try:
self.server.invoke_successfully(iscsi_stop, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error Stopping iscsi service \
on vserver %s: %s"
% (self.vserver, to_native(e)),
exception=traceback.format_exc())
def start_iscsi_service(self):
"""
Start iscsi service
"""
iscsi_start = netapp_utils.zapi.NaElement.create_node_with_children(
'iscsi-service-start')
try:
self.server.invoke_successfully(iscsi_start, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error starting iscsi service \
on vserver %s: %s"
% (self.vserver, to_native(e)),
exception=traceback.format_exc())
def apply(self):
property_changed = False
iscsi_service_exists = False
netapp_utils.ems_log_event("na_ontap_iscsi", self.server)
iscsi_service_detail = self.get_iscsi()
if iscsi_service_detail:
self.is_started = iscsi_service_detail['is_started']
iscsi_service_exists = True
if self.state == 'absent':
property_changed = True
elif self.state == 'present':
is_started = 'started' if self.is_started else 'stopped'
property_changed = is_started != self.service_state
else:
if self.state == 'present':
property_changed = True
if property_changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not iscsi_service_exists:
self.create_iscsi_service()
elif self.service_state == 'started':
self.start_iscsi_service()
else:
self.stop_iscsi_service()
elif self.state == 'absent':
self.delete_iscsi_service()
changed = property_changed
# TODO: include other details about the lun (size, etc.)
self.module.exit_json(changed=changed)
def main():
v = NetAppOntapISCSI()
v.apply()
if __name__ == '__main__':
main()
| Jorge-Rodriguez/ansible | lib/ansible/modules/storage/netapp/na_ontap_iscsi.py | Python | gpl-3.0 | 8,339 |
"""
kombu.serialization
===================
Serialization utilities.
"""
from __future__ import absolute_import
import codecs
import os
import sys
import pickle as pypickle
try:
import cPickle as cpickle
except ImportError: # pragma: no cover
cpickle = None # noqa
from collections import namedtuple
from contextlib import contextmanager
from .exceptions import (
ContentDisallowed, DecodeError, EncodeError, SerializerNotInstalled
)
from .five import BytesIO, reraise, text_t
from .utils import entrypoints
from .utils.encoding import str_to_bytes, bytes_t
__all__ = ['pickle', 'loads', 'dumps', 'register', 'unregister']
SKIP_DECODE = frozenset(['binary', 'ascii-8bit'])
TRUSTED_CONTENT = frozenset(['application/data', 'application/text'])
if sys.platform.startswith('java'): # pragma: no cover
def _decode(t, coding):
return codecs.getdecoder(coding)(t)[0]
else:
_decode = codecs.decode
pickle = cpickle or pypickle
pickle_load = pickle.load
#: Kombu requires Python 2.5 or later so we use protocol 2 by default.
#: There's a new protocol (3) but this is only supported by Python 3.
pickle_protocol = int(os.environ.get('PICKLE_PROTOCOL', 2))
codec = namedtuple('codec', ('content_type', 'content_encoding', 'encoder'))
@contextmanager
def _reraise_errors(wrapper,
include=(Exception, ), exclude=(SerializerNotInstalled, )):
try:
yield
except exclude:
raise
except include as exc:
reraise(wrapper, wrapper(exc), sys.exc_info()[2])
def pickle_loads(s, load=pickle_load):
# used to support buffer objects
return load(BytesIO(s))
def parenthesize_alias(first, second):
return '%s (%s)' % (first, second) if first else second
class SerializerRegistry(object):
"""The registry keeps track of serialization methods."""
def __init__(self):
self._encoders = {}
self._decoders = {}
self._default_encode = None
self._default_content_type = None
self._default_content_encoding = None
self._disabled_content_types = set()
self.type_to_name = {}
self.name_to_type = {}
def register(self, name, encoder, decoder, content_type,
content_encoding='utf-8'):
if encoder:
self._encoders[name] = codec(
content_type, content_encoding, encoder,
)
if decoder:
self._decoders[content_type] = decoder
self.type_to_name[content_type] = name
self.name_to_type[name] = content_type
def enable(self, name):
if '/' not in name:
name = self.name_to_type[name]
self._disabled_content_types.discard(name)
def disable(self, name):
if '/' not in name:
name = self.name_to_type[name]
self._disabled_content_types.add(name)
def unregister(self, name):
try:
content_type = self.name_to_type[name]
self._decoders.pop(content_type, None)
self._encoders.pop(name, None)
self.type_to_name.pop(content_type, None)
self.name_to_type.pop(name, None)
except KeyError:
raise SerializerNotInstalled(
'No encoder/decoder installed for {0}'.format(name))
def _set_default_serializer(self, name):
"""
Set the default serialization method used by this library.
:param name: The name of the registered serialization method.
For example, `json` (default), `pickle`, `yaml`, `msgpack`,
or any custom methods registered using :meth:`register`.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
"""
try:
(self._default_content_type, self._default_content_encoding,
self._default_encode) = self._encoders[name]
except KeyError:
raise SerializerNotInstalled(
'No encoder installed for {0}'.format(name))
def dumps(self, data, serializer=None):
if serializer == 'raw':
return raw_encode(data)
if serializer and not self._encoders.get(serializer):
raise SerializerNotInstalled(
'No encoder installed for {0}'.format(serializer))
# If a raw string was sent, assume binary encoding
# (it's likely either ASCII or a raw binary file, and a character
# set of 'binary' will encompass both, even if not ideal.
if not serializer and isinstance(data, bytes_t):
# In Python 3+, this would be "bytes"; allow binary data to be
# sent as a message without getting encoder errors
return 'application/data', 'binary', data
# For Unicode objects, force it into a string
if not serializer and isinstance(data, text_t):
with _reraise_errors(EncodeError, exclude=()):
payload = data.encode('utf-8')
return 'text/plain', 'utf-8', payload
if serializer:
content_type, content_encoding, encoder = \
self._encoders[serializer]
else:
encoder = self._default_encode
content_type = self._default_content_type
content_encoding = self._default_content_encoding
with _reraise_errors(EncodeError):
payload = encoder(data)
return content_type, content_encoding, payload
encode = dumps # XXX compat
def loads(self, data, content_type, content_encoding,
accept=None, force=False, _trusted_content=TRUSTED_CONTENT):
content_type = content_type or 'application/data'
if accept is not None:
if content_type not in _trusted_content \
and content_type not in accept:
raise self._for_untrusted_content(content_type, 'untrusted')
else:
if content_type in self._disabled_content_types and not force:
raise self._for_untrusted_content(content_type, 'disabled')
content_encoding = (content_encoding or 'utf-8').lower()
if data:
decode = self._decoders.get(content_type)
if decode:
with _reraise_errors(DecodeError):
return decode(data)
if content_encoding not in SKIP_DECODE and \
not isinstance(data, text_t):
with _reraise_errors(DecodeError):
return _decode(data, content_encoding)
return data
decode = loads # XXX compat
def _for_untrusted_content(self, ctype, why):
return ContentDisallowed(
'Refusing to deserialize {0} content of type {1}'.format(
why,
parenthesize_alias(self.type_to_name.get(ctype, ctype), ctype),
),
)
#: Global registry of serializers/deserializers.
registry = SerializerRegistry()
"""
.. function:: dumps(data, serializer=default_serializer)
Serialize a data structure into a string suitable for sending
as an AMQP message body.
:param data: The message data to send. Can be a list,
dictionary or a string.
:keyword serializer: An optional string representing
the serialization method you want the data marshalled
into. (For example, `json`, `raw`, or `pickle`).
If :const:`None` (default), then json will be used, unless
`data` is a :class:`str` or :class:`unicode` object. In this
latter case, no serialization occurs as it would be
unnecessary.
Note that if `serializer` is specified, then that
serialization method will be used even if a :class:`str`
or :class:`unicode` object is passed in.
:returns: A three-item tuple containing the content type
(e.g., `application/json`), content encoding, (e.g.,
`utf-8`) and a string containing the serialized
data.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
"""
dumps = encode = registry.encode # XXX encode is a compat alias
"""
.. function:: loads(data, content_type, content_encoding):
Deserialize a data stream as serialized using `dumps`
based on `content_type`.
:param data: The message data to deserialize.
:param content_type: The content-type of the data.
(e.g., `application/json`).
:param content_encoding: The content-encoding of the data.
(e.g., `utf-8`, `binary`, or `us-ascii`).
:returns: The unserialized data.
"""
loads = decode = registry.decode # XXX decode is a compat alias
"""
.. function:: register(name, encoder, decoder, content_type,
content_encoding='utf-8'):
Register a new encoder/decoder.
:param name: A convenience name for the serialization method.
:param encoder: A method that will be passed a python data structure
and should return a string representing the serialized data.
If :const:`None`, then only a decoder will be registered. Encoding
will not be possible.
:param decoder: A method that will be passed a string representing
serialized data and should return a python data structure.
If :const:`None`, then only an encoder will be registered.
Decoding will not be possible.
:param content_type: The mime-type describing the serialized
structure.
:param content_encoding: The content encoding (character set) that
the `decoder` method will be returning. Will usually be
`utf-8`, `us-ascii`, or `binary`.
"""
register = registry.register
"""
.. function:: unregister(name):
Unregister registered encoder/decoder.
:param name: Registered serialization method name.
"""
unregister = registry.unregister
def raw_encode(data):
"""Special case serializer."""
content_type = 'application/data'
payload = data
if isinstance(payload, text_t):
content_encoding = 'utf-8'
with _reraise_errors(EncodeError, exclude=()):
payload = payload.encode(content_encoding)
else:
content_encoding = 'binary'
return content_type, content_encoding, payload
def register_json():
"""Register a encoder/decoder for JSON serialization."""
from anyjson import loads as json_loads, dumps as json_dumps
def _loads(obj):
if isinstance(obj, bytes_t):
obj = obj.decode()
return json_loads(obj)
registry.register('json', json_dumps, _loads,
content_type='application/json',
content_encoding='utf-8')
def register_yaml():
"""Register a encoder/decoder for YAML serialization.
It is slower than JSON, but allows for more data types
to be serialized. Useful if you need to send data such as dates"""
try:
import yaml
registry.register('yaml', yaml.safe_dump, yaml.safe_load,
content_type='application/x-yaml',
content_encoding='utf-8')
except ImportError:
def not_available(*args, **kwargs):
"""In case a client receives a yaml message, but yaml
isn't installed."""
raise SerializerNotInstalled(
'No decoder installed for YAML. Install the PyYAML library')
registry.register('yaml', None, not_available, 'application/x-yaml')
if sys.version_info[0] == 3: # pragma: no cover
def unpickle(s):
return pickle_loads(str_to_bytes(s))
else:
unpickle = pickle_loads # noqa
def register_pickle():
"""The fastest serialization method, but restricts
you to python clients."""
def pickle_dumps(obj, dumper=pickle.dumps):
return dumper(obj, protocol=pickle_protocol)
registry.register('pickle', pickle_dumps, unpickle,
content_type='application/x-python-serialize',
content_encoding='binary')
def register_msgpack():
"""See http://msgpack.sourceforge.net/"""
try:
try:
from msgpack import packb as pack, unpackb
unpack = lambda s: unpackb(s, encoding='utf-8')
except ImportError:
# msgpack < 0.2.0 and Python 2.5
from msgpack import packs as pack, unpacks as unpack # noqa
registry.register(
'msgpack', pack, unpack,
content_type='application/x-msgpack',
content_encoding='binary')
except (ImportError, ValueError):
def not_available(*args, **kwargs):
"""In case a client receives a msgpack message, but yaml
isn't installed."""
raise SerializerNotInstalled(
'No decoder installed for msgpack. '
'Please install the msgpack library')
registry.register('msgpack', None, not_available,
'application/x-msgpack')
# Register the base serialization methods.
register_json()
register_pickle()
register_yaml()
register_msgpack()
# Default serializer is 'json'
registry._set_default_serializer('json')
_setupfuns = {
'json': register_json,
'pickle': register_pickle,
'yaml': register_yaml,
'msgpack': register_msgpack,
'application/json': register_json,
'application/x-yaml': register_yaml,
'application/x-python-serialize': register_pickle,
'application/x-msgpack': register_msgpack,
}
def enable_insecure_serializers(choices=['pickle', 'yaml', 'msgpack']):
"""Enable serializers that are considered to be unsafe.
Will enable ``pickle``, ``yaml`` and ``msgpack`` by default,
but you can also specify a list of serializers (by name or content type)
to enable.
"""
for choice in choices:
try:
registry.enable(choice)
except KeyError:
pass
def disable_insecure_serializers(allowed=['json']):
"""Disable untrusted serializers.
Will disable all serializers except ``json``
or you can specify a list of deserializers to allow.
.. note::
Producers will still be able to serialize data
in these formats, but consumers will not accept
incoming data using the untrusted content types.
"""
for name in registry._decoders:
registry.disable(name)
if allowed is not None:
for name in allowed:
registry.enable(name)
# Insecure serializers are disabled by default since v3.0
disable_insecure_serializers()
# Load entrypoints from installed extensions
for ep, args in entrypoints('kombu.serializers'): # pragma: no cover
register(ep.name, *args)
def prepare_accept_content(l, name_to_type=registry.name_to_type):
if l is not None:
return set(n if '/' in n else name_to_type[n] for n in l)
return l
| nirmeshk/oh-mainline | vendor/packages/kombu/kombu/serialization.py | Python | agpl-3.0 | 14,799 |
"""
A Marketplace only command that finds apps missing from the search index and
adds them.
"""
import sys
import elasticsearch
from django.core.management.base import BaseCommand
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import Webapp
class Command(BaseCommand):
help = 'Fix up Marketplace index.'
def handle(self, *args, **kwargs):
index = WebappIndexer.get_index()
doctype = WebappIndexer.get_mapping_type_name()
es = WebappIndexer.get_es()
app_ids = Webapp.objects.values_list('id', flat=True)
missing_ids = []
for app_id in app_ids:
try:
es.get(index, app_id, doctype, fields='id')
except elasticsearch.NotFoundError:
# App doesn't exist in our index, add it to `missing_ids`.
missing_ids.append(app_id)
if missing_ids:
sys.stdout.write('Adding %s doc(s) to the index.'
% len(missing_ids))
WebappIndexer().run_indexing(missing_ids, es)
else:
sys.stdout.write('No docs missing from index.')
| mozilla/zamboni | lib/es/management/commands/fixup_mkt_index.py | Python | bsd-3-clause | 1,143 |
import logging
import platform
import os
from twilio.exceptions import TwilioException
from twilio.rest.resources import Connection
from twilio.rest.resources import UNSET_TIMEOUT
from twilio.rest.resources import make_request
from twilio.version import __version__ as LIBRARY_VERSION
def find_credentials(environ=None):
"""
Look in the current environment for Twilio credentials
:param environ: the environment to check
"""
environment = environ or os.environ
try:
account = environment["TWILIO_ACCOUNT_SID"]
token = environment["TWILIO_AUTH_TOKEN"]
return account, token
except KeyError:
return None, None
def set_twilio_proxy(proxy_url, proxy_port):
Connection.set_proxy_info(proxy_url, proxy_port)
class TwilioClient(object):
def __init__(self, account=None, token=None, base="https://api.twilio.com",
version="2010-04-01", timeout=UNSET_TIMEOUT,
request_account=None):
"""
Create a Twilio API client.
"""
# Get account credentials
if not account or not token:
account, token = find_credentials()
if not account or not token:
raise TwilioException("""
Twilio could not find your account credentials. Pass them into the
TwilioRestClient constructor like this:
client = TwilioRestClient(account='AC38135355602040856210245275870',
token='2flnf5tdp7so0lmfdu3d')
Or, add your credentials to your shell environment. From the terminal, run
echo "export TWILIO_ACCOUNT_SID=AC3813535560204085626521" >> ~/.bashrc
echo "export TWILIO_AUTH_TOKEN=2flnf5tdp7so0lmfdu3d7wod" >> ~/.bashrc
and be sure to replace the values for the Account SID and auth token with the
values from your Twilio Account at https://www.twilio.com/user/account.
""")
self.base = base
self.auth = (account, token)
self.timeout = timeout
req_account = request_account if request_account else account
self.account_uri = "{0}/{1}/Accounts/{2}".format(base,
version, req_account)
def request(self, path, method=None, vars=None):
"""sends a request and gets a response from the Twilio REST API
.. deprecated:: 3.0
:param path: the URL (relative to the endpoint URL, after the /v1
:param url: the HTTP method to use, defaults to POST
:param vars: for POST or PUT, a dict of data to send
:returns: Twilio response in XML or raises an exception on error
:raises: a :exc:`ValueError` if the path is invalid
:raises: a :exc:`NotImplementedError` if the method is unknown
This method is only included for backwards compatability reasons.
It will be removed in a future version
"""
logging.warning(":meth:`TwilioRestClient.request` is deprecated and "
"will be removed in a future version")
vars = vars or {}
params = None
data = None
if not path or len(path) < 1:
raise ValueError('Invalid path parameter')
if method and method not in ['GET', 'POST', 'DELETE', 'PUT']:
raise NotImplementedError(
'HTTP %s method not implemented' % method)
if path[0] == '/':
uri = self.base + path
else:
uri = self.base + '/' + path
if method == "GET":
params = vars
elif method == "POST" or method == "PUT":
data = vars
user_agent = "twilio-python %s (python-%s)" % (
LIBRARY_VERSION,
platform.python_version(),
)
headers = {
"User-Agent": user_agent,
"Accept-Charset": "utf-8",
}
resp = make_request(method, uri, auth=self.auth, data=data,
params=params, headers=headers)
return resp.content
| kramwens/order_bot | venv/lib/python2.7/site-packages/twilio/rest/base.py | Python | mit | 3,986 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lxml import etree
from translate.storage import aresource, test_monolingual
from translate.misc.multistring import multistring
from translate.storage.base import TranslationStore
class TestAndroidResourceUnit(test_monolingual.TestMonolingualUnit):
UnitClass = aresource.AndroidResourceUnit
def __check_escape(self, string, xml, target_language=None):
"""Helper that checks that a string is output with the right escape."""
unit = self.UnitClass("Test String")
if (target_language is not None):
store = TranslationStore()
store.settargetlanguage(target_language)
unit._store = store
unit.target = string
print("unit.target:", repr(unit.target))
print("xml:", repr(xml))
assert str(unit) == xml
def __check_parse(self, string, xml):
"""Helper that checks that a string is parsed correctly."""
parser = etree.XMLParser(strip_cdata=False)
translatable = 'translatable="false"' not in xml
et = etree.fromstring(xml, parser)
unit = self.UnitClass.createfromxmlElement(et)
print("unit.target:", repr(unit.target))
print("string:", string)
print("translatable:", repr(unit.istranslatable()))
assert unit.target == string
assert unit.istranslatable() == translatable
############################ Check string escape ##########################
def test_escape_message_with_newline(self):
string = 'message\nwith newline'
xml = '<string name="Test String">message\\nwith newline</string>\n\n'
self.__check_escape(string, xml)
def test_escape_message_with_newline_in_xml(self):
string = 'message \nwith newline in xml'
xml = ('<string name="Test String">message\n\\nwith newline in xml'
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_twitter(self):
string = '@twitterescape'
xml = '<string name="Test String">\\@twitterescape</string>\n\n'
self.__check_escape(string, xml)
def test_escape_quote(self):
string = 'quote \'escape\''
xml = '<string name="Test String">quote \\\'escape\\\'</string>\n\n'
self.__check_escape(string, xml)
def test_escape_double_space(self):
string = 'double space'
xml = '<string name="Test String">"double space"</string>\n\n'
self.__check_escape(string, xml)
def test_escape_leading_space(self):
string = ' leading space'
xml = '<string name="Test String">" leading space"</string>\n\n'
self.__check_escape(string, xml)
def test_escape_trailing_space(self):
string = 'leading space '
xml = '<string name="Test String">"leading space "</string>\n\n'
self.__check_escape(string, xml)
def test_escape_xml_entities(self):
string = '>xml&entities'
xml = '<string name="Test String">>xml&entities</string>\n\n'
self.__check_escape(string, xml)
def test_escape_html_code(self):
string = 'some <b>html code</b> here'
xml = ('<string name="Test String">some <b>html code</b> here'
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_code_quote(self):
string = 'some <b>html code</b> \'here\''
xml = ('<string name="Test String">some <b>html code</b> \\\'here\\\''
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_arrows(self):
string = '<<< arrow'
xml = '<string name="Test String"><<< arrow</string>\n\n'
self.__check_escape(string, xml)
def test_escape_link(self):
string = '<a href="http://example.net">link</a>'
xml = ('<string name="Test String">\n'
' <a href="http://example.net">link</a>\n'
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_link_and_text(self):
string = '<a href="http://example.net">link</a> and text'
xml = ('<string name="Test String"><a href="http://example.net">link'
'</a> and text</string>\n\n')
self.__check_escape(string, xml)
def test_escape_blank_string(self):
string = ''
xml = '<string name="Test String"></string>\n\n'
self.__check_escape(string, xml)
def test_plural_escape_message_with_newline(self):
mString = multistring(['one message\nwith newline', 'other message\nwith newline'])
xml = ('<plurals name="Test String">\n\t'
'<item quantity="one">one message\\nwith newline</item>\n\t'
'<item quantity="other">other message\\nwith newline</item>\n'
'</plurals>\n\n')
self.__check_escape(mString, xml, 'en')
def test_escape_html_quote(self):
string = 'start \'here\' <b>html code \'to escape\'</b> also \'here\''
xml = ('<string name="Test String">start \\\'here\\\' <b>html code \\\'to escape\\\'</b> also \\\'here\\\''
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_leading_space(self):
string = ' <b>html code \'to escape\'</b> some \'here\''
xml = ('<string name="Test String"> <b>html code \\\'to escape\\\'</b> some \\\'here\\\''
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_trailing_space(self):
string = '<b>html code \'to escape\'</b> some \'here\' '
xml = ('<string name="Test String"><b>html code \\\'to escape\\\'</b> some \\\'here\\\' '
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_with_ampersand(self):
string = '<b>html code \'to escape\'</b> some \'here\' with & char'
xml = ('<string name="Test String"><b>html code \\\'to escape\\\'</b> some \\\'here\\\' with & char'
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_double_space(self):
string = '<b>html code \'to escape\'</b> some \'here\''
xml = ('<string name="Test String"><b>"html code \\\'to escape\\\'"</b> some \\\'here\\\''
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_deep_double_space(self):
string = '<b>html code \'to <i>escape</i>\'</b> some \'here\''
xml = ('<string name="Test String"><b>"html code \\\'to "<i>escape</i>\\\'</b> some \\\'here\\\''
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_complex_xml(self):
string = '<g:test xmlns:g="ttt" g:somevalue="aaaa " aaa">value</g:test> & outer > <br/>text'
xml = ('<string name="Test String">'
'<g:test xmlns:g="ttt" g:somevalue="aaaa " aaa">value</g:test> & outer > <br/>text'
'</string>\n\n')
self.__check_escape(string, xml)
############################ Check string parse ###########################
def test_parse_message_with_newline(self):
string = 'message\nwith newline'
xml = '<string name="Test String">message\\nwith newline</string>\n\n'
self.__check_parse(string, xml)
def test_parse_message_with_newline_in_xml(self):
string = 'message \nwith\n newline\nin xml'
xml = ('<string name="Test String">message\n\\nwith\\n\nnewline\\n\\\nin xml'
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_twitter(self):
string = '@twitterescape'
xml = '<string name="Test String">\\@twitterescape</string>\n\n'
self.__check_parse(string, xml)
def test_parse_quote(self):
string = 'quote \'escape\''
xml = '<string name="Test String">quote \\\'escape\\\'</string>\n\n'
self.__check_parse(string, xml)
def test_parse_double_space(self):
string = 'double space'
xml = '<string name="Test String">"double space"</string>\n\n'
self.__check_parse(string, xml)
def test_parse_leading_space(self):
string = ' leading space'
xml = '<string name="Test String">" leading space"</string>\n\n'
self.__check_parse(string, xml)
def test_parse_xml_entities(self):
string = '>xml&entities'
xml = '<string name="Test String">>xml&entities</string>\n\n'
self.__check_parse(string, xml)
def test_parse_html_code(self):
string = 'some <b>html code</b> here'
xml = ('<string name="Test String">some <b>html code</b> here'
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_arrows(self):
string = '<<< arrow'
xml = '<string name="Test String"><<< arrow</string>\n\n'
self.__check_parse(string, xml)
def test_parse_link(self):
string = '<a href="http://example.net">link</a>'
xml = ('<string name="Test String"><a href="http://example.net">link'
'</a></string>\n\n')
self.__check_parse(string, xml)
def test_parse_link_and_text(self):
string = '<a href="http://example.net">link</a> and text'
xml = ('<string name="Test String"><a href="http://example.net">link'
'</a> and text</string>\n\n')
self.__check_parse(string, xml)
def test_parse_blank_string(self):
string = ''
xml = '<string name="Test String"></string>\n\n'
self.__check_parse(string, xml)
def test_parse_blank_string_again(self):
string = ''
xml = '<string name="Test String"/>\n\n'
self.__check_parse(string, xml)
def test_parse_double_quotes_string(self):
"""Check that double quotes got removed."""
string = 'double quoted text'
xml = '<string name="Test String">"double quoted text"</string>\n\n'
self.__check_parse(string, xml)
def test_parse_newline_in_string(self):
"""Check that newline is read as space.
At least it seems to be what Android does.
"""
string = 'newline in string'
xml = '<string name="Test String">newline\nin string</string>\n\n'
self.__check_parse(string, xml)
def test_parse_not_translatable_string(self):
string = 'string'
xml = ('<string name="Test String" translatable="false">string'
'</string>\n\n')
self.__check_parse(string, xml)
def test_plural_parse_message_with_newline(self):
mString = multistring(['one message\nwith newline', 'other message\nwith newline'])
xml = ('<plurals name="Test String">\n\t'
'<item quantity="one">one message\\nwith newline</item>\n\t'
'<item quantity="other">other message\\nwith newline</item>\n\n'
'</plurals>\n\n')
self.__check_parse(mString, xml)
def test_parse_html_quote(self):
string = 'start \'here\' <b>html code \'to escape\'</b> also \'here\''
xml = ('<string name="Test String">start \\\'here\\\' <b>html code \\\'to escape\\\'</b> also \\\'here\\\''
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_leading_space(self):
string = ' <b>html code \'to escape\'</b> some \'here\''
xml = ('<string name="Test String"> <b>html code \\\'to escape\\\'</b> some \\\'here\\\''
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_leading_space_quoted(self):
string = ' <b>html code \'to escape\'</b> some \'here\''
xml = ('<string name="Test String">" "<b>"html code \'to escape\'"</b>" some \'here\'"'
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_trailing_space(self):
string = '<b>html code \'to escape\'</b> some \'here\' '
xml = ('<string name="Test String"><b>html code \\\'to escape\\\'</b> some \\\'here\\\' '
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_trailing_space_quoted(self):
string = '<b>html code \'to escape\'</b> some \'here\' '
xml = ('<string name="Test String"><b>"html code \'to escape\'"</b>" some \'here\' "'
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_with_ampersand(self):
string = '<b>html code \'to escape\'</b> some \'here\' with & char'
xml = ('<string name="Test String"><b>html code \\\'to escape\\\'</b> some \\\'here\\\' with & char'
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_double_space_quoted(self):
string = '<b>html code \'to escape\'</b> some \'here\''
xml = ('<string name="Test String"><b>"html code \'to escape\'"</b>" some \'here\'"'
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_deep_double_space_quoted(self):
string = '<b>html code \'to <i> escape</i>\'</b> some \'here\''
xml = ('<string name="Test String"><b>"html code \'to "<i>" escape"</i>\\\'</b> some \\\'here\\\''
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_complex_xml(self):
string = '<g:test xmlns:g="ttt" g:somevalue="aaaa " aaa">value</g:test> outer & text'
xml = ('<string name="Test String">'
'<g:test xmlns:g="ttt" g:somevalue="aaaa " aaa">value</g:test> outer & text'
'</string>\n\n')
self.__check_parse(string, xml)
class TestAndroidResourceFile(test_monolingual.TestMonolingualStore):
StoreClass = aresource.AndroidResourceFile
def test_targetlanguage_default_handlings(self):
store = self.StoreClass()
# Initial value is None
assert store.gettargetlanguage() is None
# sourcelanguage shouldn't change the targetlanguage
store.setsourcelanguage('en')
assert store.gettargetlanguage() is None
# targetlanguage setter works correctly
store.settargetlanguage('de')
assert store.gettargetlanguage() == 'de'
# explicit targetlanguage wins over filename
store.filename = 'dommy/values-it/res.xml'
assert store.gettargetlanguage() == 'de'
def test_targetlanguage_auto_detection_filename(self):
store = self.StoreClass()
# Check language auto_detection
store.filename = 'project/values-it/res.xml'
assert store.gettargetlanguage() == 'it'
def test_targetlanguage_auto_detection_filename_default_language(self):
store = self.StoreClass()
store.setsourcelanguage('en')
# Check language auto_detection
store.filename = 'project/values/res.xml'
assert store.gettargetlanguage() == 'en'
def test_targetlanguage_auto_detection_invalid_filename(self):
store = self.StoreClass()
store.setsourcelanguage('en')
store.filename = 'project/invalid_directory/res.xml'
assert store.gettargetlanguage() is None
store.filename = 'invalid_directory'
assert store.gettargetlanguage() is None
| bluemini/kuma | vendor/packages/translate/storage/test_aresource.py | Python | mpl-2.0 | 15,276 |
""" A sparse matrix in COOrdinate or 'triplet' format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['coo_matrix', 'isspmatrix_coo']
from warnings import warn
import numpy as np
from scipy._lib.six import xrange, zip as izip
from ._sparsetools import coo_tocsr, coo_todense, coo_matvec
from .base import isspmatrix
from .data import _data_matrix, _minmax_mixin
from .sputils import (upcast, upcast_char, to_native, isshape, getdtype,
isintlike, get_index_dtype, downcast_intp_index)
class coo_matrix(_data_matrix, _minmax_mixin):
"""
A sparse matrix in COOrdinate format.
Also known as the 'ijv' or 'triplet' format.
This can be instantiated in several ways:
coo_matrix(D)
with a dense matrix D
coo_matrix(S)
with another sparse matrix S (equivalent to S.tocoo())
coo_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
coo_matrix((data, (i, j)), [shape=(M, N)])
to construct from three arrays:
1. data[:] the entries of the matrix, in any order
2. i[:] the row indices of the matrix entries
3. j[:] the column indices of the matrix entries
Where ``A[i[k], j[k]] = data[k]``. When shape is not
specified, it is inferred from the index arrays
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
COO format data array of the matrix
row
COO format row index array of the matrix
col
COO format column index array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the COO format
- facilitates fast conversion among sparse formats
- permits duplicate entries (see example)
- very fast conversion to and from CSR/CSC formats
Disadvantages of the COO format
- does not directly support:
+ arithmetic operations
+ slicing
Intended Usage
- COO is a fast format for constructing sparse matrices
- Once a matrix has been constructed, convert to CSR or
CSC format for fast arithmetic and matrix vector operations
- By default when converting to CSR or CSC format, duplicate (i,j)
entries will be summed together. This facilitates efficient
construction of finite element matrices and the like. (see example)
Examples
--------
>>> from scipy.sparse import coo_matrix
>>> coo_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 3, 1, 0])
>>> col = np.array([0, 3, 1, 2])
>>> data = np.array([4, 5, 7, 9])
>>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray()
array([[4, 0, 9, 0],
[0, 7, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5]])
>>> # example with duplicates
>>> row = np.array([0, 0, 1, 3, 1, 0, 0])
>>> col = np.array([0, 2, 1, 3, 1, 0, 0])
>>> data = np.array([1, 1, 1, 1, 1, 1, 1])
>>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isinstance(arg1, tuple):
if isshape(arg1):
M, N = arg1
self.shape = (M,N)
idx_dtype = get_index_dtype(maxval=max(M, N))
self.row = np.array([], dtype=idx_dtype)
self.col = np.array([], dtype=idx_dtype)
self.data = np.array([], getdtype(dtype, default=float))
self.has_canonical_format = True
else:
try:
obj, ij = arg1
except:
raise TypeError('invalid input format')
try:
if len(ij) != 2:
raise TypeError
except TypeError:
raise TypeError('invalid input format')
row, col = ij
if shape is None:
if len(row) == 0 or len(col) == 0:
raise ValueError('cannot infer dimensions from zero '
'sized index arrays')
M = np.max(row) + 1
N = np.max(col) + 1
self.shape = (M, N)
else:
# Use 2 steps to ensure shape has length 2.
M, N = shape
self.shape = (M, N)
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.row = np.array(row, copy=copy, dtype=idx_dtype)
self.col = np.array(col, copy=copy, dtype=idx_dtype)
self.data = np.array(obj, copy=copy)
self.has_canonical_format = False
elif arg1 is None:
# Initialize an empty matrix.
if not isinstance(shape, tuple) or not isintlike(shape[0]):
raise TypeError('dimensions not understood')
warn('coo_matrix(None, shape=(M,N)) is deprecated, '
'use coo_matrix( (M,N) ) instead', DeprecationWarning)
idx_dtype = get_index_dtype(maxval=max(M, N))
self.shape = shape
self.data = np.array([], getdtype(dtype, default=float))
self.row = np.array([], dtype=idx_dtype)
self.col = np.array([], dtype=idx_dtype)
self.has_canonical_format = True
else:
if isspmatrix(arg1):
if isspmatrix_coo(arg1) and copy:
self.row = arg1.row.copy()
self.col = arg1.col.copy()
self.data = arg1.data.copy()
self.shape = arg1.shape
else:
coo = arg1.tocoo()
self.row = coo.row
self.col = coo.col
self.data = coo.data
self.shape = coo.shape
self.has_canonical_format = False
else:
#dense argument
try:
M = np.atleast_2d(np.asarray(arg1))
except:
raise TypeError('invalid input format')
if M.ndim != 2:
raise TypeError('expected dimension <= 2 array or matrix')
else:
self.shape = M.shape
self.row, self.col = M.nonzero()
self.data = M[self.row, self.col]
self.has_canonical_format = True
if dtype is not None:
self.data = self.data.astype(dtype)
self._check()
def getnnz(self, axis=None):
"""Get the count of explicitly-stored values (nonzeros)
Parameters
----------
axis : None, 0, or 1
Select between the number of values across the whole matrix, in
each column, or in each row.
"""
if axis is None:
nnz = len(self.data)
if nnz != len(self.row) or nnz != len(self.col):
raise ValueError('row, column, and data array must all be the '
'same length')
if self.data.ndim != 1 or self.row.ndim != 1 or \
self.col.ndim != 1:
raise ValueError('row, column, and data arrays must be 1-D')
return int(nnz)
if axis < 0:
axis += 2
if axis == 0:
return np.bincount(downcast_intp_index(self.col),
minlength=self.shape[1])
elif axis == 1:
return np.bincount(downcast_intp_index(self.row),
minlength=self.shape[0])
else:
raise ValueError('axis out of bounds')
nnz = property(fget=getnnz)
def _check(self):
""" Checks data structure for consistency """
nnz = self.nnz
# index arrays should have integer data types
if self.row.dtype.kind != 'i':
warn("row index array has non-integer dtype (%s) "
% self.row.dtype.name)
if self.col.dtype.kind != 'i':
warn("col index array has non-integer dtype (%s) "
% self.col.dtype.name)
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.row = np.asarray(self.row, dtype=idx_dtype)
self.col = np.asarray(self.col, dtype=idx_dtype)
self.data = to_native(self.data)
if nnz > 0:
if self.row.max() >= self.shape[0]:
raise ValueError('row index exceeds matrix dimensions')
if self.col.max() >= self.shape[1]:
raise ValueError('column index exceeds matrix dimensions')
if self.row.min() < 0:
raise ValueError('negative row index found')
if self.col.min() < 0:
raise ValueError('negative column index found')
def transpose(self, copy=False):
M,N = self.shape
return coo_matrix((self.data, (self.col, self.row)), shape=(N,M), copy=copy)
def toarray(self, order=None, out=None):
"""See the docstring for `spmatrix.toarray`."""
B = self._process_toarray_args(order, out)
fortran = int(B.flags.f_contiguous)
if not fortran and not B.flags.c_contiguous:
raise ValueError("Output array must be C or F contiguous")
M,N = self.shape
coo_todense(M, N, self.nnz, self.row, self.col, self.data,
B.ravel('A'), fortran)
return B
def tocsc(self):
"""Return a copy of this matrix in Compressed Sparse Column format
Duplicate entries will be summed together.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import coo_matrix
>>> row = array([0, 0, 1, 3, 1, 0, 0])
>>> col = array([0, 2, 1, 3, 1, 0, 0])
>>> data = array([1, 1, 1, 1, 1, 1, 1])
>>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsc()
>>> A.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
from .csc import csc_matrix
if self.nnz == 0:
return csc_matrix(self.shape, dtype=self.dtype)
else:
M,N = self.shape
idx_dtype = get_index_dtype((self.col, self.row),
maxval=max(self.nnz, M))
indptr = np.empty(N + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
coo_tocsr(N, M, self.nnz,
self.col.astype(idx_dtype),
self.row.astype(idx_dtype),
self.data,
indptr, indices, data)
A = csc_matrix((data, indices, indptr), shape=self.shape)
A.sum_duplicates()
return A
def tocsr(self):
"""Return a copy of this matrix in Compressed Sparse Row format
Duplicate entries will be summed together.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import coo_matrix
>>> row = array([0, 0, 1, 3, 1, 0, 0])
>>> col = array([0, 2, 1, 3, 1, 0, 0])
>>> data = array([1, 1, 1, 1, 1, 1, 1])
>>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsr()
>>> A.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
from .csr import csr_matrix
if self.nnz == 0:
return csr_matrix(self.shape, dtype=self.dtype)
else:
M,N = self.shape
idx_dtype = get_index_dtype((self.row, self.col),
maxval=max(self.nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
coo_tocsr(M, N, self.nnz,
self.row.astype(idx_dtype),
self.col.astype(idx_dtype),
self.data,
indptr,
indices,
data)
A = csr_matrix((data, indices, indptr), shape=self.shape)
A.sum_duplicates()
return A
def tocoo(self, copy=False):
if copy:
return self.copy()
else:
return self
def todia(self):
from .dia import dia_matrix
ks = self.col - self.row # the diagonal for each nonzero
diags = np.unique(ks)
if len(diags) > 100:
#probably undesired, should we do something?
#should todia() have a maxdiags parameter?
pass
#initialize and fill in data array
if self.data.size == 0:
data = np.zeros((0, 0), dtype=self.dtype)
else:
data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype)
data[np.searchsorted(diags,ks), self.col] = self.data
return dia_matrix((data,diags), shape=self.shape)
def todok(self):
from .dok import dok_matrix
self.sum_duplicates()
dok = dok_matrix((self.shape), dtype=self.dtype)
dok.update(izip(izip(self.row,self.col),self.data))
return dok
def diagonal(self):
# Could be rewritten without the python loop.
# Data entries at the same (row, col) are summed.
n = min(self.shape)
ndata = self.data.shape[0]
d = np.zeros(n, dtype=self.dtype)
for i in xrange(ndata):
r = self.row[i]
if r == self.col[i]:
d[r] += self.data[i]
return d
diagonal.__doc__ = _data_matrix.diagonal.__doc__
def _setdiag(self, values, k):
M, N = self.shape
if values.ndim and not len(values):
return
idx_dtype = self.row.dtype
# Determine which triples to keep and where to put the new ones.
full_keep = self.col - self.row != k
if k < 0:
max_index = min(M+k, N)
if values.ndim:
max_index = min(max_index, len(values))
keep = np.logical_or(full_keep, self.col >= max_index)
new_row = np.arange(-k, -k + max_index, dtype=idx_dtype)
new_col = np.arange(max_index, dtype=idx_dtype)
else:
max_index = min(M, N-k)
if values.ndim:
max_index = min(max_index, len(values))
keep = np.logical_or(full_keep, self.row >= max_index)
new_row = np.arange(max_index, dtype=idx_dtype)
new_col = np.arange(k, k + max_index, dtype=idx_dtype)
# Define the array of data consisting of the entries to be added.
if values.ndim:
new_data = values[:max_index]
else:
new_data = np.empty(max_index, dtype=self.dtype)
new_data[:] = values
# Update the internal structure.
self.row = np.concatenate((self.row[keep], new_row))
self.col = np.concatenate((self.col[keep], new_col))
self.data = np.concatenate((self.data[keep], new_data))
self.has_canonical_format = False
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the index arrays
(i.e. .row and .col) are copied.
"""
if copy:
return coo_matrix((data, (self.row.copy(), self.col.copy())),
shape=self.shape, dtype=data.dtype)
else:
return coo_matrix((data, (self.row, self.col)),
shape=self.shape, dtype=data.dtype)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
This is an *in place* operation
"""
if self.has_canonical_format or len(self.data) == 0:
return
order = np.lexsort((self.row,self.col))
self.row = self.row[order]
self.col = self.col[order]
self.data = self.data[order]
unique_mask = ((self.row[1:] != self.row[:-1]) |
(self.col[1:] != self.col[:-1]))
unique_mask = np.append(True, unique_mask)
self.row = self.row[unique_mask]
self.col = self.col[unique_mask]
unique_inds, = np.nonzero(unique_mask)
self.data = np.add.reduceat(self.data, unique_inds, dtype=self.dtype)
self.has_canonical_format = True
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
#output array
result = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
other.dtype.char))
coo_matvec(self.nnz, self.row, self.col, self.data, other, result)
return result
def _mul_multivector(self, other):
return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T])
def isspmatrix_coo(x):
return isinstance(x, coo_matrix)
| ales-erjavec/scipy | scipy/sparse/coo.py | Python | bsd-3-clause | 17,960 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015-16, Ritesh Khadgaray <khadgaray () gmail.com>
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_vm_shell
short_description: Run commands in a VMware guest operating system
description:
- Module allows user to run common system administration commands in the guest operating system.
version_added: "2.1"
author:
- Ritesh Khadgaray (@ritzk)
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 5.5, 6.0 and 6.5.
- Only the first match against vm_id is used, even if there are multiple matches.
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter:
description:
- The datacenter hosting the virtual machine.
- If set, it will help to speed up virtual machine search.
type: str
cluster:
description:
- The cluster hosting the virtual machine.
- If set, it will help to speed up virtual machine search.
type: str
folder:
description:
- Destination folder, absolute or relative path to find an existing guest or create the new guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
version_added: "2.4"
type: str
vm_id:
description:
- Name of the virtual machine to work with.
required: True
type: str
vm_id_type:
description:
- The VMware identification method by which the virtual machine will be identified.
default: vm_name
choices: ['uuid', 'instance_uuid', 'dns_name', 'inventory_path', 'vm_name']
type: str
vm_username:
description:
- The user to login-in to the virtual machine.
required: True
type: str
vm_password:
description:
- The password used to login-in to the virtual machine.
required: True
type: str
vm_shell:
description:
- The absolute path to the program to start.
- On Linux, shell is executed via bash.
required: True
type: str
vm_shell_args:
description:
- The argument to the program.
- The characters which must be escaped to the shell also be escaped on the command line provided.
default: " "
type: str
vm_shell_env:
description:
- Comma separated list of environment variable, specified in the guest OS notation.
type: list
vm_shell_cwd:
description:
- The current working directory of the application from which it will be run.
type: str
wait_for_process:
description:
- If set to C(True), module will wait for process to complete in the given virtual machine.
default: False
type: bool
version_added: 2.7
timeout:
description:
- Timeout in seconds.
- If set to positive integers, then C(wait_for_process) will honor this parameter and will exit after this timeout.
default: 3600
version_added: 2.7
type: int
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Run command inside a virtual machine
vmware_vm_shell:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter }}"
folder: "/{{datacenter}}/vm"
vm_id: "{{ vm_name }}"
vm_username: root
vm_password: superSecret
vm_shell: /bin/echo
vm_shell_args: " $var >> myFile "
vm_shell_env:
- "PATH=/bin"
- "VAR=test"
vm_shell_cwd: "/tmp"
delegate_to: localhost
register: shell_command_output
- name: Run command inside a virtual machine with wait and timeout
vmware_vm_shell:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter }}"
folder: "/{{datacenter}}/vm"
vm_id: NameOfVM
vm_username: root
vm_password: superSecret
vm_shell: /bin/sleep
vm_shell_args: 100
wait_for_process: True
timeout: 2000
delegate_to: localhost
register: shell_command_with_wait_timeout
- name: Change user password in the guest machine
vmware_vm_shell:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter }}"
folder: "/{{datacenter}}/vm"
vm_id: "{{ vm_name }}"
vm_username: sample
vm_password: old_password
vm_shell: "/bin/echo"
vm_shell_args: "-e 'old_password\nnew_password\nnew_password' | passwd sample > /tmp/$$.txt 2>&1"
delegate_to: localhost
- name: Change hostname of guest machine
vmware_vm_shell:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: "{{ datacenter }}"
folder: "/{{datacenter}}/vm"
vm_id: "{{ vm_name }}"
vm_username: testUser
vm_password: SuperSecretPassword
vm_shell: "/usr/bin/hostnamectl"
vm_shell_args: "set-hostname new_hostname > /tmp/$$.txt 2>&1"
delegate_to: localhost
'''
RETURN = r'''
results:
description: metadata about the new process after completion with wait_for_process
returned: on success
type: dict
sample:
{
"cmd_line": "\"/bin/sleep\" 1",
"end_time": "2018-04-26T05:03:21+00:00",
"exit_code": 0,
"name": "sleep",
"owner": "dev1",
"start_time": "2018-04-26T05:03:19+00:00",
"uuid": "564db1e2-a3ff-3b0e-8b77-49c25570bb66",
}
'''
import time
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (PyVmomi, find_cluster_by_name,
find_datacenter_by_name, find_vm_by_id,
vmware_argument_spec)
class VMwareShellManager(PyVmomi):
def __init__(self, module):
super(VMwareShellManager, self).__init__(module)
datacenter_name = module.params['datacenter']
cluster_name = module.params['cluster']
folder = module.params['folder']
self.pm = self.content.guestOperationsManager.processManager
self.timeout = self.params.get('timeout', 3600)
self.wait_for_pid = self.params.get('wait_for_process', False)
datacenter = None
if datacenter_name:
datacenter = find_datacenter_by_name(self.content, datacenter_name)
if not datacenter:
module.fail_json(changed=False, msg="Unable to find %(datacenter)s datacenter" % module.params)
cluster = None
if cluster_name:
cluster = find_cluster_by_name(self.content, cluster_name, datacenter)
if not cluster:
module.fail_json(changed=False, msg="Unable to find %(cluster)s cluster" % module.params)
if module.params['vm_id_type'] == 'inventory_path':
vm = find_vm_by_id(self.content,
vm_id=module.params['vm_id'],
vm_id_type="inventory_path",
folder=folder)
else:
vm = find_vm_by_id(self.content,
vm_id=module.params['vm_id'],
vm_id_type=module.params['vm_id_type'],
datacenter=datacenter,
cluster=cluster)
if not vm:
module.fail_json(msg='Unable to find virtual machine.')
tools_status = vm.guest.toolsStatus
if tools_status in ['toolsNotInstalled', 'toolsNotRunning']:
self.module.fail_json(msg="VMwareTools is not installed or is not running in the guest."
" VMware Tools are necessary to run this module.")
try:
self.execute_command(vm, module.params)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(changed=False, msg=to_native(runtime_fault.msg))
except vmodl.MethodFault as method_fault:
module.fail_json(changed=False, msg=to_native(method_fault.msg))
except Exception as e:
module.fail_json(changed=False, msg=to_native(e))
def execute_command(self, vm, params):
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/execute_program_in_vm.py
vm_username = params['vm_username']
vm_password = params['vm_password']
program_path = params['vm_shell']
args = params['vm_shell_args']
env = params['vm_shell_env']
cwd = params['vm_shell_cwd']
credentials = vim.vm.guest.NamePasswordAuthentication(username=vm_username,
password=vm_password)
cmd_spec = vim.vm.guest.ProcessManager.ProgramSpec(arguments=args,
envVariables=env,
programPath=program_path,
workingDirectory=cwd)
res = self.pm.StartProgramInGuest(vm=vm, auth=credentials, spec=cmd_spec)
if self.wait_for_pid:
res_data = self.wait_for_process(vm, res, credentials)
results = dict(uuid=vm.summary.config.uuid,
owner=res_data.owner,
start_time=res_data.startTime.isoformat(),
end_time=res_data.endTime.isoformat(),
exit_code=res_data.exitCode,
name=res_data.name,
cmd_line=res_data.cmdLine)
if res_data.exitCode != 0:
results['msg'] = "Failed to execute command"
results['changed'] = False
results['failed'] = True
self.module.fail_json(**results)
else:
results['changed'] = True
results['failed'] = False
self.module.exit_json(**results)
else:
self.module.exit_json(changed=True, uuid=vm.summary.config.uuid, msg=res)
def process_exists_in_guest(self, vm, pid, creds):
res = self.pm.ListProcessesInGuest(vm, creds, pids=[pid])
if not res:
self.module.fail_json(
changed=False, msg='ListProcessesInGuest: None (unexpected)')
res = res[0]
if res.exitCode is None:
return True, None
else:
return False, res
def wait_for_process(self, vm, pid, creds):
start_time = time.time()
while True:
current_time = time.time()
process_status, res_data = self.process_exists_in_guest(vm, pid, creds)
if not process_status:
return res_data
elif current_time - start_time >= self.timeout:
self.module.fail_json(
msg="Timeout waiting for process to complete.",
vm=vm._moId,
pid=pid,
start_time=start_time,
current_time=current_time,
timeout=self.timeout)
else:
time.sleep(5)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
dict(
datacenter=dict(type='str'),
cluster=dict(type='str'),
folder=dict(type='str'),
vm_id=dict(type='str', required=True),
vm_id_type=dict(default='vm_name', type='str',
choices=['inventory_path',
'uuid',
'instance_uuid',
'dns_name',
'vm_name']),
vm_username=dict(type='str', required=True),
vm_password=dict(type='str', no_log=True, required=True),
vm_shell=dict(type='str', required=True),
vm_shell_args=dict(default=" ", type='str'),
vm_shell_env=dict(type='list'),
vm_shell_cwd=dict(type='str'),
wait_for_process=dict(type='bool', default=False),
timeout=dict(type='int', default=3600),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
required_if=[
['vm_id_type', 'inventory_path', ['folder']]
],
)
vm_shell_mgr = VMwareShellManager(module)
if __name__ == '__main__':
main()
| kustodian/ansible | lib/ansible/modules/cloud/vmware/vmware_vm_shell.py | Python | gpl-3.0 | 13,417 |
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.vyos import vyos_user
from .vyos_module import TestVyosModule, load_fixture, set_module_args
class TestVyosUserModule(TestVyosModule):
module = vyos_user
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.vyos.vyos_user.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.vyos.vyos_user.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.get_config.return_value = load_fixture('vyos_user_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
def test_vyos_user_password(self):
set_module_args(dict(name='ansible', configured_password='test'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['set system login user ansible authentication plaintext-password test'])
def test_vyos_user_delete(self):
set_module_args(dict(name='ansible', state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['delete system login user ansible'])
def test_vyos_user_level(self):
set_module_args(dict(name='ansible', level='operator'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['set system login user ansible level operator'])
def test_vyos_user_level_invalid(self):
set_module_args(dict(name='ansible', level='sysadmin'))
self.execute_module(failed=True)
def test_vyos_user_purge(self):
set_module_args(dict(purge=True))
result = self.execute_module(changed=True)
self.assertEqual(sorted(result['commands']), sorted(['delete system login user ansible',
'delete system login user admin']))
def test_vyos_user_update_password_changed(self):
set_module_args(dict(name='test', configured_password='test', update_password='on_create'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['set system login user test authentication plaintext-password test'])
def test_vyos_user_update_password_on_create_ok(self):
set_module_args(dict(name='ansible', configured_password='test', update_password='on_create'))
self.execute_module()
def test_vyos_user_update_password_always(self):
set_module_args(dict(name='ansible', configured_password='test', update_password='always'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['set system login user ansible authentication plaintext-password test'])
| ppanczyk/ansible | test/units/modules/network/vyos/test_vyos_user.py | Python | gpl-3.0 | 3,752 |
import os
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.utils.translation import activate, get_language, trans_real
from .utils import POFileAssertionMixin
SAMPLEPROJECT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sampleproject')
SAMPLEPROJECT_LOCALE = os.path.join(SAMPLEPROJECT_DIR, 'locale')
@override_settings(LOCALE_PATHS=[SAMPLEPROJECT_LOCALE])
class FrenchTestCase(SimpleTestCase):
"""Tests using the French translations of the sampleproject."""
PO_FILE = os.path.join(SAMPLEPROJECT_LOCALE, 'fr', 'LC_MESSAGES', 'django.po')
def setUp(self):
self._language = get_language()
self._translations = trans_real._translations
activate('fr')
def tearDown(self):
trans_real._translations = self._translations
activate(self._language)
class ExtractingStringsWithPercentSigns(POFileAssertionMixin, FrenchTestCase):
"""
Tests the extracted string found in the gettext catalog.
Percent signs are python formatted.
These tests should all have an analogous translation tests below, ensuring
the Python formatting does not persist through to a rendered template.
"""
def setUp(self):
super().setUp()
with open(self.PO_FILE) as fp:
self.po_contents = fp.read()
def test_trans_tag_with_percent_symbol_at_the_end(self):
self.assertMsgId('Literal with a percent symbol at the end %%', self.po_contents)
def test_trans_tag_with_percent_symbol_in_the_middle(self):
self.assertMsgId('Literal with a percent %% symbol in the middle', self.po_contents)
self.assertMsgId('It is 100%%', self.po_contents)
def test_trans_tag_with_string_that_look_like_fmt_spec(self):
self.assertMsgId('Looks like a str fmt spec %%s but should not be interpreted as such', self.po_contents)
self.assertMsgId('Looks like a str fmt spec %% o but should not be interpreted as such', self.po_contents)
def test_adds_python_format_to_all_percent_signs(self):
self.assertMsgId('1 percent sign %%, 2 percent signs %%%%, 3 percent signs %%%%%%', self.po_contents)
self.assertMsgId('%(name)s says: 1 percent sign %%, 2 percent signs %%%%', self.po_contents)
class RenderingTemplatesWithPercentSigns(FrenchTestCase):
"""
Test rendering of templates that use percent signs.
Ensures both trans and blocktrans tags behave consistently.
Refs #11240, #11966, #24257
"""
def test_translates_with_a_percent_symbol_at_the_end(self):
expected = 'Littérale avec un symbole de pour cent à la fin %'
trans_tpl = Template('{% load i18n %}{% trans "Literal with a percent symbol at the end %" %}')
self.assertEqual(trans_tpl.render(Context({})), expected)
block_tpl = Template(
'{% load i18n %}{% blocktrans %}Literal with a percent symbol at '
'the end %{% endblocktrans %}'
)
self.assertEqual(block_tpl.render(Context({})), expected)
def test_translates_with_percent_symbol_in_the_middle(self):
expected = 'Pour cent littérale % avec un symbole au milieu'
trans_tpl = Template('{% load i18n %}{% trans "Literal with a percent % symbol in the middle" %}')
self.assertEqual(trans_tpl.render(Context({})), expected)
block_tpl = Template(
'{% load i18n %}{% blocktrans %}Literal with a percent % symbol '
'in the middle{% endblocktrans %}'
)
self.assertEqual(block_tpl.render(Context({})), expected)
def test_translates_with_percent_symbol_using_context(self):
trans_tpl = Template('{% load i18n %}{% trans "It is 100%" %}')
self.assertEqual(trans_tpl.render(Context({})), 'Il est de 100%')
trans_tpl = Template('{% load i18n %}{% trans "It is 100%" context "female" %}')
self.assertEqual(trans_tpl.render(Context({})), 'Elle est de 100%')
block_tpl = Template('{% load i18n %}{% blocktrans %}It is 100%{% endblocktrans %}')
self.assertEqual(block_tpl.render(Context({})), 'Il est de 100%')
block_tpl = Template('{% load i18n %}{% blocktrans context "female" %}It is 100%{% endblocktrans %}')
self.assertEqual(block_tpl.render(Context({})), 'Elle est de 100%')
def test_translates_with_string_that_look_like_fmt_spec_with_trans(self):
# tests "%s"
expected = ('On dirait un spec str fmt %s mais ne devrait pas être interprété comme plus disponible')
trans_tpl = Template(
'{% load i18n %}{% trans "Looks like a str fmt spec %s but '
'should not be interpreted as such" %}'
)
self.assertEqual(trans_tpl.render(Context({})), expected)
block_tpl = Template(
'{% load i18n %}{% blocktrans %}Looks like a str fmt spec %s but '
'should not be interpreted as such{% endblocktrans %}'
)
self.assertEqual(block_tpl.render(Context({})), expected)
# tests "% o"
expected = ('On dirait un spec str fmt % o mais ne devrait pas être interprété comme plus disponible')
trans_tpl = Template(
'{% load i18n %}{% trans "Looks like a str fmt spec % o but should not be '
'interpreted as such" %}'
)
self.assertEqual(trans_tpl.render(Context({})), expected)
block_tpl = Template(
'{% load i18n %}{% blocktrans %}Looks like a str fmt spec % o but should not be '
'interpreted as such{% endblocktrans %}'
)
self.assertEqual(block_tpl.render(Context({})), expected)
def test_translates_multiple_percent_signs(self):
expected = ('1 % signe pour cent, signes %% 2 pour cent, trois signes de pourcentage %%%')
trans_tpl = Template(
'{% load i18n %}{% trans "1 percent sign %, 2 percent signs %%, '
'3 percent signs %%%" %}'
)
self.assertEqual(trans_tpl.render(Context({})), expected)
block_tpl = Template(
'{% load i18n %}{% blocktrans %}1 percent sign %, 2 percent signs '
'%%, 3 percent signs %%%{% endblocktrans %}'
)
self.assertEqual(block_tpl.render(Context({})), expected)
block_tpl = Template(
'{% load i18n %}{% blocktrans %}{{name}} says: 1 percent sign %, '
'2 percent signs %%{% endblocktrans %}'
)
self.assertEqual(
block_tpl.render(Context({"name": "Django"})),
'Django dit: 1 pour cent signe %, deux signes de pourcentage %%'
)
| georgemarshall/django | tests/i18n/test_percents.py | Python | bsd-3-clause | 6,626 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from saharaclient import client as sahara_client
from openstack_dashboard.test import helpers
from openstack_dashboard.contrib.sahara import api
class SaharaAPITestCase(helpers.APITestCase):
def setUp(self):
super(SaharaAPITestCase, self).setUp()
self._original_saharaclient = api.sahara.client
api.sahara.client = lambda request: self.stub_saharaclient()
def tearDown(self):
super(SaharaAPITestCase, self).tearDown()
api.sahara.client = self._original_saharaclient
def stub_saharaclient(self):
if not hasattr(self, "saharaclient"):
self.mox.StubOutWithMock(sahara_client, 'Client')
self.saharaclient = self.mox.CreateMock(sahara_client.Client)
return self.saharaclient
| FNST-OpenStack/horizon | openstack_dashboard/contrib/sahara/test/helpers.py | Python | apache-2.0 | 1,346 |
# -*- coding: utf-8 -*-
""" Sahana Eden Staff Module Automated Tests
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class Staff(SeleniumUnitTest):
def test_staff001_create_staff(self):
"""
@case: asset001
@description: Create a Staff Member - IN PROGRESS
* RENE: Insert instructions
"""
print "\n"
| devinbalkind/eden | modules/tests/staff/staff.py | Python | mit | 1,563 |
#
# The Python Imaging Library.
# $Id$
#
# base class for image file handlers
#
# history:
# 1995-09-09 fl Created
# 1996-03-11 fl Fixed load mechanism.
# 1996-04-15 fl Added pcx/xbm decoders.
# 1996-04-30 fl Added encoders.
# 1996-12-14 fl Added load helpers
# 1997-01-11 fl Use encode_to_file where possible
# 1997-08-27 fl Flush output in _save
# 1998-03-05 fl Use memory mapping for some modes
# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B"
# 1999-05-31 fl Added image parser
# 2000-10-12 fl Set readonly flag on memory-mapped images
# 2002-03-20 fl Use better messages for common decoder errors
# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available
# 2003-10-30 fl Added StubImageFile class
# 2004-02-25 fl Made incremental parser more robust
#
# Copyright (c) 1997-2004 by Secret Labs AB
# Copyright (c) 1995-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
from PIL._util import isPath
import io
import os
import sys
import traceback
MAXBLOCK = 65536
SAFEBLOCK = 1024*1024
LOAD_TRUNCATED_IMAGES = False
ERRORS = {
-1: "image buffer overrun error",
-2: "decoding error",
-3: "unknown error",
-8: "bad configuration",
-9: "out of memory error"
}
def raise_ioerror(error):
try:
message = Image.core.getcodecstatus(error)
except AttributeError:
message = ERRORS.get(error)
if not message:
message = "decoder error %d" % error
raise IOError(message + " when reading image file")
#
# --------------------------------------------------------------------
# Helpers
def _tilesort(t):
# sort on offset
return t[2]
#
# --------------------------------------------------------------------
# ImageFile base class
class ImageFile(Image.Image):
"Base class for image file format handlers."
def __init__(self, fp=None, filename=None):
Image.Image.__init__(self)
self.tile = None
self.readonly = 1 # until we know better
self.decoderconfig = ()
self.decodermaxblock = MAXBLOCK
if isPath(fp):
# filename
self.fp = open(fp, "rb")
self.filename = fp
else:
# stream
self.fp = fp
self.filename = filename
try:
self._open()
except IndexError as v: # end of data
if Image.DEBUG > 1:
traceback.print_exc()
raise SyntaxError(v)
except TypeError as v: # end of data (ord)
if Image.DEBUG > 1:
traceback.print_exc()
raise SyntaxError(v)
except KeyError as v: # unsupported mode
if Image.DEBUG > 1:
traceback.print_exc()
raise SyntaxError(v)
except EOFError as v: # got header but not the first frame
if Image.DEBUG > 1:
traceback.print_exc()
raise SyntaxError(v)
if not self.mode or self.size[0] <= 0:
raise SyntaxError("not identified by this driver")
def draft(self, mode, size):
"Set draft mode"
pass
def verify(self):
"Check file integrity"
# raise exception if something's wrong. must be called
# directly after open, and closes file when finished.
self.fp = None
def load(self):
"Load image data based on tile list"
pixel = Image.Image.load(self)
if self.tile is None:
raise IOError("cannot load this image")
if not self.tile:
return pixel
self.map = None
use_mmap = self.filename and len(self.tile) == 1
# As of pypy 2.1.0, memory mapping was failing here.
use_mmap = use_mmap and not hasattr(sys, 'pypy_version_info')
readonly = 0
# look for read/seek overrides
try:
read = self.load_read
# don't use mmap if there are custom read/seek functions
use_mmap = False
except AttributeError:
read = self.fp.read
try:
seek = self.load_seek
use_mmap = False
except AttributeError:
seek = self.fp.seek
if use_mmap:
# try memory mapping
d, e, o, a = self.tile[0]
if d == "raw" and a[0] == self.mode and a[0] in Image._MAPMODES:
try:
if hasattr(Image.core, "map"):
# use built-in mapper
self.map = Image.core.map(self.filename)
self.map.seek(o)
self.im = self.map.readimage(
self.mode, self.size, a[1], a[2]
)
else:
# use mmap, if possible
import mmap
fp = open(self.filename, "r+")
size = os.path.getsize(self.filename)
# FIXME: on Unix, use PROT_READ etc
self.map = mmap.mmap(fp.fileno(), size)
self.im = Image.core.map_buffer(
self.map, self.size, d, e, o, a
)
readonly = 1
except (AttributeError, EnvironmentError, ImportError):
self.map = None
self.load_prepare()
if not self.map:
# sort tiles in file order
self.tile.sort(key=_tilesort)
try:
# FIXME: This is a hack to handle TIFF's JpegTables tag.
prefix = self.tile_prefix
except AttributeError:
prefix = b""
# Buffer length read; assign a default value
t = 0
for d, e, o, a in self.tile:
d = Image._getdecoder(self.mode, d, a, self.decoderconfig)
seek(o)
try:
d.setimage(self.im, e)
except ValueError:
continue
b = prefix
t = len(b)
while True:
try:
s = read(self.decodermaxblock)
except IndexError as ie: # truncated png/gif
if LOAD_TRUNCATED_IMAGES:
break
else:
raise IndexError(ie)
if not s and not d.handles_eof: # truncated jpeg
self.tile = []
# JpegDecode needs to clean things up here either way
# If we don't destroy the decompressor,
# we have a memory leak.
d.cleanup()
if LOAD_TRUNCATED_IMAGES:
break
else:
raise IOError("image file is truncated "
"(%d bytes not processed)" % len(b))
b = b + s
n, e = d.decode(b)
if n < 0:
break
b = b[n:]
t = t + n
# Need to cleanup here to prevent leaks in PyPy
d.cleanup()
self.tile = []
self.readonly = readonly
self.fp = None # might be shared
if not self.map and (not LOAD_TRUNCATED_IMAGES or t == 0) and e < 0:
# still raised if decoder fails to return anything
raise_ioerror(e)
# post processing
if hasattr(self, "tile_post_rotate"):
# FIXME: This is a hack to handle rotated PCD's
self.im = self.im.rotate(self.tile_post_rotate)
self.size = self.im.size
self.load_end()
return Image.Image.load(self)
def load_prepare(self):
# create image memory if necessary
if not self.im or\
self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.new(self.mode, self.size)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def load_end(self):
# may be overridden
pass
# may be defined for contained formats
# def load_seek(self, pos):
# pass
# may be defined for blocked formats (e.g. PNG)
# def load_read(self, bytes):
# pass
class StubImageFile(ImageFile):
"""
Base class for stub image loaders.
A stub loader is an image loader that can identify files of a
certain format, but relies on external code to load the file.
"""
def _open(self):
raise NotImplementedError(
"StubImageFile subclass must implement _open"
)
def load(self):
loader = self._load()
if loader is None:
raise IOError("cannot find loader for this %s file" % self.format)
image = loader.load(self)
assert image is not None
# become the other object (!)
self.__class__ = image.__class__
self.__dict__ = image.__dict__
def _load(self):
"(Hook) Find actual image loader."
raise NotImplementedError(
"StubImageFile subclass must implement _load"
)
class Parser(object):
"""
Incremental image parser. This class implements the standard
feed/close consumer interface.
In Python 2.x, this is an old-style class.
"""
incremental = None
image = None
data = None
decoder = None
offset = 0
finished = 0
def reset(self):
"""
(Consumer) Reset the parser. Note that you can only call this
method immediately after you've created a parser; parser
instances cannot be reused.
"""
assert self.data is None, "cannot reuse parsers"
def feed(self, data):
"""
(Consumer) Feed data to the parser.
:param data: A string buffer.
:exception IOError: If the parser failed to parse the image file.
"""
# collect data
if self.finished:
return
if self.data is None:
self.data = data
else:
self.data = self.data + data
# parse what we have
if self.decoder:
if self.offset > 0:
# skip header
skip = min(len(self.data), self.offset)
self.data = self.data[skip:]
self.offset = self.offset - skip
if self.offset > 0 or not self.data:
return
n, e = self.decoder.decode(self.data)
if n < 0:
# end of stream
self.data = None
self.finished = 1
if e < 0:
# decoding error
self.image = None
raise_ioerror(e)
else:
# end of image
return
self.data = self.data[n:]
elif self.image:
# if we end up here with no decoder, this file cannot
# be incrementally parsed. wait until we've gotten all
# available data
pass
else:
# attempt to open this file
try:
try:
fp = io.BytesIO(self.data)
im = Image.open(fp)
finally:
fp.close() # explicitly close the virtual file
except IOError:
# traceback.print_exc()
pass # not enough data
else:
flag = hasattr(im, "load_seek") or hasattr(im, "load_read")
if flag or len(im.tile) != 1:
# custom load code, or multiple tiles
self.decode = None
else:
# initialize decoder
im.load_prepare()
d, e, o, a = im.tile[0]
im.tile = []
self.decoder = Image._getdecoder(
im.mode, d, a, im.decoderconfig
)
self.decoder.setimage(im.im, e)
# calculate decoder offset
self.offset = o
if self.offset <= len(self.data):
self.data = self.data[self.offset:]
self.offset = 0
self.image = im
def close(self):
"""
(Consumer) Close the stream.
:returns: An image object.
:exception IOError: If the parser failed to parse the image file either
because it cannot be identified or cannot be
decoded.
"""
# finish decoding
if self.decoder:
# get rid of what's left in the buffers
self.feed(b"")
self.data = self.decoder = None
if not self.finished:
raise IOError("image was incomplete")
if not self.image:
raise IOError("cannot parse this image")
if self.data:
# incremental parsing not possible; reopen the file
# not that we have all data
try:
fp = io.BytesIO(self.data)
self.image = Image.open(fp)
finally:
self.image.load()
fp.close() # explicitly close the virtual file
return self.image
# --------------------------------------------------------------------
def _save(im, fp, tile, bufsize=0):
"""Helper to save image based on tile list
:param im: Image object.
:param fp: File object.
:param tile: Tile list.
:param bufsize: Optional buffer size
"""
im.load()
if not hasattr(im, "encoderconfig"):
im.encoderconfig = ()
tile.sort(key=_tilesort)
# FIXME: make MAXBLOCK a configuration parameter
# It would be great if we could have the encoder specify what it needs
# But, it would need at least the image size in most cases. RawEncode is
# a tricky case.
bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c
try:
fh = fp.fileno()
fp.flush()
except (AttributeError, io.UnsupportedOperation):
# compress to Python file-compatible object
for e, b, o, a in tile:
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
if o > 0:
fp.seek(o, 0)
e.setimage(im.im, b)
while True:
l, s, d = e.encode(bufsize)
fp.write(d)
if s:
break
if s < 0:
raise IOError("encoder error %d when writing image file" % s)
e.cleanup()
else:
# slight speedup: compress to real file object
for e, b, o, a in tile:
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
if o > 0:
fp.seek(o, 0)
e.setimage(im.im, b)
s = e.encode_to_file(fh, bufsize)
if s < 0:
raise IOError("encoder error %d when writing image file" % s)
e.cleanup()
try:
fp.flush()
except:
pass
def _safe_read(fp, size):
"""
Reads large blocks in a safe way. Unlike fp.read(n), this function
doesn't trust the user. If the requested size is larger than
SAFEBLOCK, the file is read block by block.
:param fp: File handle. Must implement a <b>read</b> method.
:param size: Number of bytes to read.
:returns: A string containing up to <i>size</i> bytes of data.
"""
if size <= 0:
return b""
if size <= SAFEBLOCK:
return fp.read(size)
data = []
while size > 0:
block = fp.read(min(size, SAFEBLOCK))
if not block:
break
data.append(block)
size -= len(block)
return b"".join(data)
| whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/PIL/ImageFile.py | Python | mit | 16,120 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_device_sshd
short_description: Manage the SSHD settings of a BIG-IP
description:
- Manage the SSHD settings of a BIG-IP.
version_added: "2.2"
options:
allow:
description:
- Specifies, if you have enabled SSH access, the IP address or address
range for other systems that can use SSH to communicate with this
system.
choices:
- all
- IP address, such as 172.27.1.10
- IP range, such as 172.27.*.* or 172.27.0.0/255.255.0.0
banner:
description:
- Whether to enable the banner or not.
choices:
- enabled
- disabled
banner_text:
description:
- Specifies the text to include on the pre-login banner that displays
when a user attempts to login to the system using SSH.
inactivity_timeout:
description:
- Specifies the number of seconds before inactivity causes an SSH
session to log out.
log_level:
description:
- Specifies the minimum SSHD message level to include in the system log.
choices:
- debug
- debug1
- debug2
- debug3
- error
- fatal
- info
- quiet
- verbose
login:
description:
- Specifies, when checked C(enabled), that the system accepts SSH
communications.
choices:
- enabled
- disabled
port:
description:
- Port that you want the SSH daemon to run on.
notes:
- Requires BIG-IP version 12.0.0 or greater
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Set the banner for the SSHD service from a string
bigip_device_sshd:
banner: enabled
banner_text: banner text goes here
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Set the banner for the SSHD service from a file
bigip_device_sshd:
banner: enabled
banner_text: "{{ lookup('file', '/path/to/file') }}"
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Set the SSHD service to run on port 2222
bigip_device_sshd:
password: secret
port: 2222
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
allow:
description: >
Specifies, if you have enabled SSH access, the IP address or address
range for other systems that can use SSH to communicate with this
system.
returned: changed
type: string
sample: 192.0.2.*
banner:
description: Whether the banner is enabled or not.
returned: changed
type: string
sample: true
banner_text:
description: >
Specifies the text included on the pre-login banner that
displays when a user attempts to login to the system using SSH.
returned: changed and success
type: string
sample: This is a corporate device. Connecting to it without...
inactivity_timeout:
description: >
The number of seconds before inactivity causes an SSH
session to log out.
returned: changed
type: int
sample: 10
log_level:
description: The minimum SSHD message level to include in the system log.
returned: changed
type: string
sample: debug
login:
description: Specifies that the system accepts SSH communications or not.
returned: changed
type: bool
sample: true
port:
description: Port that you want the SSH daemon to run on.
returned: changed
type: int
sample: 22
'''
from ansible.module_utils.basic import AnsibleModule
HAS_DEVEL_IMPORTS = False
try:
# Sideband repository used for dev
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fqdn_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
HAS_DEVEL_IMPORTS = True
except ImportError:
# Upstream Ansible
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fqdn_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'bannerText': 'banner_text',
'inactivityTimeout': 'inactivity_timeout',
'logLevel': 'log_level'
}
api_attributes = [
'allow', 'banner', 'bannerText', 'inactivityTimeout', 'logLevel',
'login', 'port'
]
updatables = [
'allow', 'banner', 'banner_text', 'inactivity_timeout', 'log_level',
'login', 'port'
]
returnables = [
'allow', 'banner', 'banner_text', 'inactivity_timeout', 'log_level',
'login', 'port'
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
@property
def inactivity_timeout(self):
if self._values['inactivity_timeout'] is None:
return None
return int(self._values['inactivity_timeout'])
@property
def port(self):
if self._values['port'] is None:
return None
return int(self._values['port'])
@property
def allow(self):
if self._values['allow'] is None:
return None
allow = self._values['allow']
result = list(set([str(x) for x in allow]))
result = sorted(result)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Parameters()
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(params=changed)
return True
return False
def exec_module(self):
result = dict()
try:
changed = self.update()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def read_current_from_device(self):
resource = self.client.api.tm.sys.sshd.load()
result = resource.attrs
return Parameters(params=result)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.sys.sshd.load()
resource.update(**params)
class ArgumentSpec(object):
def __init__(self):
self.choices = ['enabled', 'disabled']
self.levels = [
'debug', 'debug1', 'debug2', 'debug3', 'error', 'fatal', 'info',
'quiet', 'verbose'
]
self.supports_check_mode = True
argument_spec = dict(
allow=dict(
type='list'
),
banner=dict(
choices=self.choices
),
banner_text=dict(),
inactivity_timeout=dict(
type='int'
),
log_level=dict(
choices=self.levels
),
login=dict(
choices=self.choices
),
port=dict(
type='int'
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| ravibhure/ansible | lib/ansible/modules/network/f5/bigip_device_sshd.py | Python | gpl-3.0 | 9,949 |
#!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_webhook
author: Gaurav Rastogi (@grastogi23) <[email protected]>
short_description: Module for setup of Webhook Avi RESTful Object
description:
- This module is used to configure Webhook object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
callback_url:
description:
- Callback url for the webhook.
- Field introduced in 17.1.1.
description:
description:
- Field introduced in 17.1.1.
name:
description:
- The name of the webhook profile.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the webhook profile.
- Field introduced in 17.1.1.
verification_token:
description:
- Verification token sent back with the callback asquery parameters.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Webhook object
avi_webhook:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_webhook
"""
RETURN = '''
obj:
description: Webhook (api/webhook) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
callback_url=dict(type='str',),
description=dict(type='str',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
verification_token=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'webhook',
set([]))
if __name__ == '__main__':
main()
| alxgu/ansible | lib/ansible/modules/network/avi/avi_webhook.py | Python | gpl-3.0 | 3,916 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
test_records = [
[{
"doctype": "UOM",
"uom_name": "_Test UOM",
"must_be_whole_number": 1
}],
[{
"doctype": "UOM",
"uom_name": "_Test UOM 1"
}]
] | saurabh6790/test-med-app | setup/doctype/uom/test_uom.py | Python | agpl-3.0 | 289 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This test intermixes learning and inference calls. It checks that inserting
random inference calls have no effect on learning.
TODO: implement an SP Diff routine. That should be fun!
"""
import cPickle as pickle
import numpy as np
import random
import time
import unittest2 as unittest
from nupic.bindings.math import GetNTAReal
from nupic.research.spatial_pooler import SpatialPooler
from nupic.research.fdrutilities import spDiff
realDType = GetNTAReal()
class SPLearnInferenceTest(unittest.TestCase):
"""Test to check that inference calls do not affect learning."""
def _runLearnInference(self,
n=30,
w=15,
columnDimensions=2048,
numActiveColumnsPerInhArea=40,
spSeed=1951,
spVerbosity=0,
numTrainingRecords=100,
seed=42):
# Instantiate two identical spatial pooler. One will be used only for
# learning. The other will be trained with identical records, but with
# random inference calls thrown in
spLearnOnly = SpatialPooler(
columnDimensions=(columnDimensions, 1),
inputDimensions=(1, n),
potentialRadius=n/2,
numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
spVerbosity=spVerbosity,
seed=spSeed,
synPermInactiveDec=0.01,
synPermActiveInc=0.2,
synPermConnected=0.11,)
spLearnInfer = SpatialPooler(
columnDimensions=(columnDimensions, 1),
inputDimensions=(1, n),
potentialRadius=n/2,
numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
spVerbosity=spVerbosity,
seed=spSeed,
synPermInactiveDec=0.01,
synPermActiveInc=0.2,
synPermConnected=0.11,)
random.seed(seed)
np.random.seed(seed)
# Build up training set with numTrainingRecords patterns
inputs = [] # holds post-encoded input patterns
for i in xrange(numTrainingRecords):
inputVector = np.zeros(n, dtype=realDType)
inputVector [random.sample(xrange(n), w)] = 1
inputs.append(inputVector)
# Train each SP with identical inputs
startTime = time.time()
random.seed(seed)
np.random.seed(seed)
for i in xrange(numTrainingRecords):
if spVerbosity > 0:
print "Input #%d" % i
# TODO: See https://github.com/numenta/nupic/issues/2072
encodedInput = inputs[i]
decodedOutput = np.zeros(columnDimensions)
spLearnOnly.compute(encodedInput, learn=True, activeArray=decodedOutput)
random.seed(seed)
np.random.seed(seed)
for i in xrange(numTrainingRecords):
if spVerbosity > 0:
print "Input #%d" % i
# TODO: See https://github.com/numenta/nupic/issues/2072
encodedInput = inputs[i]
decodedOutput = np.zeros(columnDimensions)
spLearnInfer.compute(encodedInput, learn=True, activeArray=decodedOutput)
print "\nElapsed time: %.2f seconds\n" % (time.time() - startTime)
# Test that both SP"s are identical by checking learning stats
# A more in depth test would check all the coincidences, duty cycles, etc.
# ala tpDiff
# Edit: spDiff has been written as an in depth tester of the spatial pooler
learnOnlyStats = spLearnOnly.getLearningStats()
learnInferStats = spLearnInfer.getLearningStats()
success = True
# Check that the two spatial poolers are equivalent after the same training.
success = success and spDiff(spLearnInfer, spLearnOnly)
self.assertTrue(success)
# Make sure that the pickled and loaded SPs are equivalent.
spPickle = pickle.dumps(spLearnOnly, protocol=0)
spLearnOnlyLoaded = pickle.loads(spPickle)
success = success and spDiff(spLearnOnly, spLearnOnlyLoaded)
self.assertTrue(success)
for k in learnOnlyStats.keys():
if learnOnlyStats[k] != learnInferStats[k]:
success = False
print "Stat", k, "is different:", learnOnlyStats[k], learnInferStats[k]
self.assertTrue(success)
if success:
print "Test succeeded"
@unittest.skip("Currently fails due to switch from FDRCSpatial2 to SpatialPooler."
"The new SP doesn't have explicit methods to get inference.")
# TODO: See https://github.com/numenta/nupic/issues/2072
def testLearnInference(self):
self._runLearnInference(n=50, w=15)
if __name__ == "__main__":
unittest.main()
| loretoparisi/nupic | tests/unit/nupic/research/sp_learn_inference_test.py | Python | agpl-3.0 | 5,473 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models, tools
class HrHolidaysRemainingLeavesUser(models.Model):
_name = "hr.holidays.remaining.leaves.user"
_description = "Total holidays by type"
_auto = False
name = fields.Char('Employee', readonly=True)
no_of_leaves = fields.Integer('Remaining leaves', readonly=True)
user_id = fields.Many2one('res.users', string='User', readonly=True)
leave_type = fields.Char('Leave Type', readonly=True)
def init(self):
tools.drop_view_if_exists(self._cr, 'hr_holidays_remaining_leaves_user')
self._cr.execute("""
CREATE or REPLACE view hr_holidays_remaining_leaves_user as (
SELECT
min(hrs.id) as id,
rr.name as name,
sum(hrs.number_of_days) as no_of_leaves,
rr.user_id as user_id,
hhs.name as leave_type
FROM
hr_holidays as hrs, hr_employee as hre,
resource_resource as rr,hr_holidays_status as hhs
WHERE
hrs.employee_id = hre.id and
hre.resource_id = rr.id and
hhs.id = hrs.holiday_status_id
GROUP BY
rr.name,rr.user_id,hhs.name
)
""")
| chienlieu2017/it_management | odoo/addons/hr_holidays/report/hr_holidays_leaves_report.py | Python | gpl-3.0 | 1,428 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLocalDefaultSettings.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '09/01/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsSettings,
QgsLocalDefaultSettings,
QgsBearingNumericFormat)
from qgis.PyQt.QtCore import QCoreApplication
from qgis.testing import start_app, unittest
from utilities import (unitTestDataPath)
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLocalDefaultSettings(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("TestPyQgsWFSProvider.com")
QCoreApplication.setApplicationName("TestPyQgsWFSProvider")
QgsSettings().clear()
start_app()
def testBearingFormat(self):
s = QgsLocalDefaultSettings()
format = QgsBearingNumericFormat()
format.setNumberDecimalPlaces(9)
format.setDirectionFormat(QgsBearingNumericFormat.UseRange0To360)
s.setBearingFormat(format)
self.assertEqual(s.bearingFormat().numberDecimalPlaces(), 9)
self.assertEqual(s.bearingFormat().directionFormat(), QgsBearingNumericFormat.UseRange0To360)
format = QgsBearingNumericFormat()
format.setNumberDecimalPlaces(3)
format.setDirectionFormat(QgsBearingNumericFormat.UseRangeNegative180ToPositive180)
s.setBearingFormat(format)
self.assertEqual(s.bearingFormat().numberDecimalPlaces(), 3)
self.assertEqual(s.bearingFormat().directionFormat(), QgsBearingNumericFormat.UseRangeNegative180ToPositive180)
# new settings object, should persist.
s2 = QgsLocalDefaultSettings()
self.assertEqual(s2.bearingFormat().numberDecimalPlaces(), 3)
self.assertEqual(s2.bearingFormat().directionFormat(), QgsBearingNumericFormat.UseRangeNegative180ToPositive180)
if __name__ == '__main__':
unittest.main()
| pblottiere/QGIS | tests/src/python/test_qgslocaldefaultsettings.py | Python | gpl-2.0 | 2,306 |
#
# formatter.py
#
# Convert parsed content blocks to a structured document (library file).
#
# Copyright 2002-2016 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This is the base Formatter class. Its purpose is to convert a content
# processor's data into specific documents (i.e., table of contents, global
# index, and individual API reference indices).
#
# You need to sub-class it to output anything sensible. For example, the
# file `tohtml.py' contains the definition of the `HtmlFormatter' sub-class
# to output HTML.
#
from sources import *
from content import *
from utils import *
################################################################
##
## FORMATTER CLASS
##
class Formatter:
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index.sort( key = index_key )
# also add section names to dictionary (without making them appear
# in the index)
for section in self.sections:
self.add_identifier( section.name, section )
def add_identifier( self, name, block ):
if name in self.identifiers:
# duplicate name!
sys.stderr.write( "WARNING: duplicate definition for"
+ " '" + name + "' "
+ "in " + block.location() + ", "
+ "previous definition in "
+ self.identifiers[name].location()
+ "\n" )
else:
self.identifiers[name] = block
#
# formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def toc_exit( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = open_output( toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit( chap )
self.toc_index( index_filename )
self.toc_exit()
if output:
close_output( output )
#
# formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = open_output( index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit( name )
self.index_exit()
if output:
close_output( output )
#
# formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def markup_enter( self, markup, block = None ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def markup_exit( self, markup, block = None ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
if section_filename:
output = open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
skip_entry = 0
try:
block = self.identifiers[name]
# `block_names' can contain field names also,
# which we filter out
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
if field.name == name:
skip_entry = 1
except:
skip_entry = 1 # this happens e.g. for `/empty/' entries
if skip_entry:
continue
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup!
self.markup_enter( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit( field, markup, block )
self.markup_exit( markup, block )
self.block_exit( block )
self.section_exit( section )
if output:
close_output( output )
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section )
# eof
| lophyel/mupdf-for-mac | thirdparty/freetype/src/tools/docmaker/formatter.py | Python | gpl-3.0 | 6,195 |
from django.test import TestCase
from django.conf import settings
import mock
from oscar.apps.basket import forms
from oscar.test import factories
class TestBasketLineForm(TestCase):
def setUp(self):
self.basket = factories.create_basket()
self.line = self.basket.all_lines()[0]
def mock_availability_return_value(self, is_available, reason=''):
policy = self.line.purchase_info.availability
policy.is_purchase_permitted = mock.MagicMock(
return_value=(is_available, reason))
def build_form(self, quantity=None):
if quantity is None:
quantity = self.line.quantity
return forms.BasketLineForm(
strategy=self.basket.strategy,
data={'quantity': quantity},
instance=self.line)
def test_enforces_availability_policy_for_valid_quantities(self):
self.mock_availability_return_value(True)
form = self.build_form()
self.assertTrue(form.is_valid())
def test_enforces_availability_policy_for_invalid_quantities(self):
self.mock_availability_return_value(False, "Some reason")
form = self.build_form()
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['quantity'], ['Some reason'])
def test_skips_availability_policy_for_zero_quantities(self):
self.mock_availability_return_value(True)
form = self.build_form(quantity=0)
self.assertTrue(form.is_valid())
def test_enforces_max_line_quantity(self):
invalid_qty = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD + 1
form = self.build_form(quantity=invalid_qty)
self.assertFalse(form.is_valid())
class TestAddToBasketForm(TestCase):
def test_allows_a_product_quantity_to_be_increased(self):
basket = factories.create_basket()
product = basket.all_lines()[0].product
# Add more of the same product
data = {'quantity': 1}
form = forms.AddToBasketForm(
basket=basket, product=product, data=data)
self.assertTrue(form.is_valid())
def test_checks_whether_passed_product_id_matches_a_real_product(self):
basket = factories.create_basket()
product = basket.all_lines()[0].product
# Add more of the same product
data = {'quantity': -1}
form = forms.AddToBasketForm(
basket=basket, product=product, data=data)
self.assertFalse(form.is_valid())
def test_checks_if_purchase_is_permitted(self):
basket = factories.BasketFactory()
product = factories.ProductFactory()
# Build a 4-level mock monster so we can force the return value of
# whether the product is available to buy. This is a serious code smell
# and needs to be remedied.
info = mock.Mock()
info.availability = mock.Mock()
info.availability.is_purchase_permitted = mock.Mock(
return_value=(False, "Not on your nelly!"))
basket.strategy.fetch_for_product = mock.Mock(
return_value=info)
data = {'quantity': 1}
form = forms.AddToBasketForm(
basket=basket, product=product, data=data)
self.assertFalse(form.is_valid())
self.assertEqual('Not on your nelly!', form.errors['__all__'][0])
def test_mixed_currency_baskets_are_not_permitted(self):
# Ensure basket is one currency
basket = mock.Mock()
basket.currency = 'GBP'
basket.num_items = 1
# Ensure new product has different currency
info = mock.Mock()
info.price.currency = 'EUR'
basket.strategy.fetch_for_product = mock.Mock(
return_value=info)
product = factories.ProductFactory()
data = {'quantity': 1}
form = forms.AddToBasketForm(
basket=basket, product=product, data=data)
self.assertFalse(form.is_valid())
| ahmetdaglarbas/e-commerce | tests/integration/basket/form_tests.py | Python | bsd-3-clause | 3,927 |
from django.db.models import CharField, Value
from django.db.models.functions import Left, Lower
from django.test import TestCase
from ..models import Author
class LeftTests(TestCase):
@classmethod
def setUpTestData(cls):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
def test_basic(self):
authors = Author.objects.annotate(name_part=Left('name', 5))
self.assertQuerysetEqual(authors.order_by('name'), ['John ', 'Rhond'], lambda a: a.name_part)
# If alias is null, set it to the first 2 lower characters of the name.
Author.objects.filter(alias__isnull=True).update(alias=Lower(Left('name', 2)))
self.assertQuerysetEqual(authors.order_by('name'), ['smithj', 'rh'], lambda a: a.alias)
def test_invalid_length(self):
with self.assertRaisesMessage(ValueError, "'length' must be greater than 0"):
Author.objects.annotate(raises=Left('name', 0))
def test_expressions(self):
authors = Author.objects.annotate(name_part=Left('name', Value(3), output_field=CharField()))
self.assertQuerysetEqual(authors.order_by('name'), ['Joh', 'Rho'], lambda a: a.name_part)
| nesdis/djongo | tests/django_tests/tests/v22/tests/db_functions/text/test_left.py | Python | agpl-3.0 | 1,219 |
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os_client_config
from os_client_config import exceptions
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
version_added: "2.0"
notes:
- Facts are placed in the C(openstack.clouds) variable.
options:
clouds:
description:
- List of clouds to limit the return list to. No value means return
information on all configured clouds
required: false
default: []
requirements: [ os-client-config ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Get list of clouds that do not support security groups
- os_client_config:
- debug: var={{ item }}
with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}"
# Get the information back just about the mordred cloud
- os_client_config:
clouds:
- mordred
'''
def main():
module = AnsibleModule(argument_spec=dict(
clouds=dict(required=False, type='list', default=[]),
))
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = []
for cloud in config.get_all_clouds():
if not p['clouds'] or cloud.name in p['clouds']:
cloud.config['name'] = cloud.name
clouds.append(cloud.config)
module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| haad/ansible-modules-core | cloud/openstack/os_client_config.py | Python | gpl-3.0 | 2,339 |
from twisted.internet.protocol import ServerFactory, Protocol, ClientCreator
from twisted.internet.defer import DeferredList, maybeDeferred, Deferred
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.python import log
from zope.interface.verify import verifyClass
class StopStartReadingProtocol(Protocol):
def connectionMade(self):
self.transport.pauseProducing()
self.transport.resumeProducing()
reactor.callLater(0, self._beTerrible)
self.data = ''
def _beTerrible(self):
self.transport.pauseProducing()
self.transport.resumeProducing()
reactor.callLater(0, self._beMoreTerrible)
def _beMoreTerrible(self):
self.transport.pauseProducing()
self.transport.resumeProducing()
reactor.callLater(0, self.factory.ready_d.callback, self)
def dataReceived(self, data):
log.msg('got data', len(data))
self.data += data
if len(self.data) == 4*self.transport.readBufferSize:
self.factory.stop_d.callback(self.data)
class IOCPReactorTestCase(unittest.TestCase):
def test_noPendingTimerEvents(self):
"""
Test reactor behavior (doIteration) when there are no pending time
events.
"""
from twisted.internet.iocpreactor.reactor import IOCPReactor
ir = IOCPReactor()
ir.wakeUp()
self.failIf(ir.doIteration(None))
def test_stopStartReading(self):
"""
This test checks transport read state! There are three bits
of it:
1) The transport producer is paused -- transport.reading
is False)
2) The transport is about to schedule an OS read, on the next
reactor iteration -- transport._readScheduled
3) The OS has a pending asynchronous read on our behalf --
transport._readScheduledInOS
if 3) is not implemented, it is possible to trick IOCPReactor into
scheduling an OS read before the previous one finishes
"""
sf = ServerFactory()
sf.protocol = StopStartReadingProtocol
sf.ready_d = Deferred()
sf.stop_d = Deferred()
p = reactor.listenTCP(0, sf)
port = p.getHost().port
cc = ClientCreator(reactor, Protocol)
def proceed(protos, port):
log.msg('PROCEEDING WITH THE TESTATHRON')
self.assert_(protos[0])
self.assert_(protos[1])
protos = protos[0][1], protos[1][1]
protos[0].transport.write(
'x' * (2 * protos[0].transport.readBufferSize) +
'y' * (2 * protos[0].transport.readBufferSize))
return sf.stop_d.addCallback(cleanup, protos, port)
def cleanup(data, protos, port):
self.assert_(data == 'x'*(2*protos[0].transport.readBufferSize)+
'y'*(2*protos[0].transport.readBufferSize),
'did not get the right data')
return DeferredList([
maybeDeferred(protos[0].transport.loseConnection),
maybeDeferred(protos[1].transport.loseConnection),
maybeDeferred(port.stopListening)])
return (DeferredList([cc.connectTCP('127.0.0.1', port), sf.ready_d])
.addCallback(proceed, p))
def test_reactorInterfaces(self):
"""
Verify that IOCP socket-representing classes implement IReadWriteHandle
"""
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
from twisted.internet.iocpreactor import tcp, udp
verifyClass(IReadWriteHandle, tcp.Connection)
verifyClass(IReadWriteHandle, udp.Port)
if reactor.__class__.__name__ != 'IOCPReactor':
IOCPReactorTestCase.skip = 'This test only applies to IOCPReactor'
| sorenh/cc | vendor/Twisted-10.0.0/twisted/internet/test/test_iocp.py | Python | apache-2.0 | 3,873 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Extreme Networks Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
import re
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.module_utils.network.common.config import NetworkConfig, ConfigLine
_DEVICE_CONFIGS = {}
DEFAULT_COMMENT_TOKENS = ['#', '!', '/*', '*/', 'echo']
DEFAULT_IGNORE_LINES_RE = set([
re.compile(r"Preparing to Display Configuration\.\.\.")
])
def get_connection(module):
if hasattr(module, '_voss_connection'):
return module._voss_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._voss_connection = Connection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._voss_connection
def get_capabilities(module):
if hasattr(module, '_voss_capabilities'):
return module._voss_capabilities
try:
capabilities = Connection(module._socket_path).get_capabilities()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
module._voss_capabilities = json.loads(capabilities)
return module._voss_capabilities
def get_defaults_flag(module):
connection = get_connection(module)
try:
out = connection.get_defaults_flag()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return to_text(out, errors='surrogate_then_replace').strip()
def get_config(module, source='running', flags=None):
flag_str = ' '.join(to_list(flags))
try:
return _DEVICE_CONFIGS[flag_str]
except KeyError:
connection = get_connection(module)
try:
out = connection.get_config(source=source, flags=flags)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS[flag_str] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
connection = get_connection(module)
try:
out = connection.run_commands(commands=commands, check_rc=check_rc)
return out
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def load_config(module, commands):
connection = get_connection(module)
try:
resp = connection.edit_config(commands)
return resp.get('response')
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def get_sublevel_config(running_config, module):
contents = list()
current_config_contents = list()
sublevel_config = VossNetworkConfig(indent=0)
obj = running_config.get_object(module.params['parents'])
if obj:
contents = obj._children
for c in contents:
if isinstance(c, ConfigLine):
current_config_contents.append(c.raw)
sublevel_config.add(current_config_contents, module.params['parents'])
return sublevel_config
def ignore_line(text, tokens=None):
for item in (tokens or DEFAULT_COMMENT_TOKENS):
if text.startswith(item):
return True
for regex in DEFAULT_IGNORE_LINES_RE:
if regex.match(text):
return True
def voss_parse(lines, indent=None, comment_tokens=None):
toplevel = re.compile(r'(^interface.*$)|(^router \w+$)|(^router vrf \w+$)')
exitline = re.compile(r'^exit$')
entry_reg = re.compile(r'([{};])')
ancestors = list()
config = list()
dup_parent_index = None
for line in to_native(lines, errors='surrogate_or_strict').split('\n'):
text = entry_reg.sub('', line).strip()
cfg = ConfigLine(text)
if not text or ignore_line(text, comment_tokens):
continue
# Handle top level commands
if toplevel.match(text):
# Looking to see if we have existing parent
for index, item in enumerate(config):
if item.text == text:
# This means we have an existing parent with same label
dup_parent_index = index
break
ancestors = [cfg]
config.append(cfg)
# Handle 'exit' line
elif exitline.match(text):
ancestors = list()
if dup_parent_index is not None:
# We're working with a duplicate parent
# Don't need to store exit, just go to next line in config
dup_parent_index = None
else:
cfg._parents = ancestors[:1]
config.append(cfg)
# Handle sub-level commands. Only have single sub-level
elif ancestors:
cfg._parents = ancestors[:1]
if dup_parent_index is not None:
# Update existing entry, since this already exists in config
config[int(dup_parent_index)].add_child(cfg)
new_index = dup_parent_index + 1
config.insert(new_index, cfg)
else:
ancestors[0].add_child(cfg)
config.append(cfg)
else:
# Global command, no further special handling needed
config.append(cfg)
return config
class VossNetworkConfig(NetworkConfig):
def load(self, s):
self._config_text = s
self._items = voss_parse(s, self._indent)
def _diff_line(self, other):
updates = list()
for item in self.items:
if str(item) == "exit":
if updates and updates[-1]._parents:
updates.append(item)
elif item not in other:
updates.append(item)
return updates
| alxgu/ansible | lib/ansible/module_utils/network/voss/voss.py | Python | gpl-3.0 | 7,726 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Taneli Leppä <[email protected]>
#
# This file is part of Ansible (sort of)
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: gluster_volume
short_description: Manage GlusterFS volumes
description:
- Create, remove, start, stop and tune GlusterFS volumes
version_added: "1.9"
options:
name:
required: true
description:
- The volume name
state:
required: true
choices: [ 'present', 'absent', 'started', 'stopped' ]
description:
- Use present/absent ensure if a volume exists or not,
use started/stopped to control it's availability.
cluster:
required: false
default: null
description:
- List of hosts to use for probing and brick setup
host:
required: false
default: null
description:
- Override local hostname (for peer probing purposes)
replicas:
required: false
default: null
description:
- Replica count for volume
stripes:
required: false
default: null
description:
- Stripe count for volume
transport:
required: false
choices: [ 'tcp', 'rdma', 'tcp,rdma' ]
default: 'tcp'
description:
- Transport type for volume
bricks:
required: false
default: null
description:
- Brick paths on servers. Multiple brick paths can be separated by commas
aliases: ['brick']
start_on_create:
choices: [ 'yes', 'no']
required: false
description:
- Controls whether the volume is started after creation or not, defaults to yes
rebalance:
choices: [ 'yes', 'no']
required: false
default: 'no'
description:
- Controls whether the cluster is rebalanced after changes
directory:
required: false
default: null
description:
- Directory for limit-usage
options:
required: false
default: null
description:
- A dictionary/hash with options/settings for the volume
quota:
required: false
default: null
description:
- Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list)
force:
required: false
default: null
description:
- If brick is being created in the root partition, module will fail.
Set force to true to override this behaviour
notes:
- "Requires cli tools for GlusterFS on servers"
- "Will add new bricks, but not remove them"
author: "Taneli Leppä (@rosmo)"
"""
EXAMPLES = """
- name: create gluster volume
gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster="192.168.1.10,192.168.1.11"
run_once: true
- name: tune
gluster_volume: state=present name=test1 options='{performance.cache-size: 256MB}'
- name: start gluster volume
gluster_volume: state=started name=test1
- name: limit usage
gluster_volume: state=present name=test1 directory=/foo quota=20.0MB
- name: stop gluster volume
gluster_volume: state=stopped name=test1
- name: remove gluster volume
gluster_volume: state=absent name=test1
- name: create gluster volume with multiple bricks
gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster="192.168.1.10,192.168.1.11"
run_once: true
"""
import shutil
import time
import socket
glusterbin = ''
def run_gluster(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin]
args.extend(gargs)
try:
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
except Exception, e:
module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e)))
return out
def run_gluster_nofail(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin]
args.extend(gargs)
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
return None
return out
def run_gluster_yes(gargs):
global glusterbin
global module
args = [glusterbin]
args.extend(gargs)
rc, out, err = module.run_command(args, data='y\n')
if rc != 0:
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
return out
def get_peers():
out = run_gluster([ 'peer', 'status'])
i = 0
peers = {}
hostname = None
uuid = None
state = None
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'hostname':
hostname = value
if key.lower() == 'uuid':
uuid = value
if key.lower() == 'state':
state = value
peers[hostname] = [ uuid, state ]
return peers
def get_volumes():
out = run_gluster([ 'volume', 'info' ])
volumes = {}
volume = {}
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'volume name':
volume['name'] = value
volume['options'] = {}
volume['quota'] = False
if key.lower() == 'volume id':
volume['id'] = value
if key.lower() == 'status':
volume['status'] = value
if key.lower() == 'transport-type':
volume['transport'] = value
if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
if not 'bricks' in volume:
volume['bricks'] = []
volume['bricks'].append(value)
# Volume options
if '.' in key:
if not 'options' in volume:
volume['options'] = {}
volume['options'][key] = value
if key == 'features.quota' and value == 'on':
volume['quota'] = True
else:
if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
if len(volume) > 0:
volumes[volume['name']] = volume
volume = {}
return volumes
def get_quotas(name, nofail):
quotas = {}
if nofail:
out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ])
if not out:
return quotas
else:
out = run_gluster([ 'volume', 'quota', name, 'list' ])
for row in out.split('\n'):
if row[:1] == '/':
q = re.split('\s+', row)
quotas[q[0]] = q[1]
return quotas
def wait_for_peer(host):
for x in range(0, 4):
peers = get_peers()
if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
return True
time.sleep(1)
return False
def probe(host, myhostname):
global module
run_gluster([ 'peer', 'probe', host ])
if not wait_for_peer(host):
module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
changed = True
def probe_all_peers(hosts, peers, myhostname):
for host in hosts:
host = host.strip() # Clean up any extra space for exact comparison
if host not in peers:
# dont probe ourselves
if myhostname != host:
probe(host, myhostname)
def create_volume(name, stripe, replica, transport, hosts, bricks, force):
args = [ 'volume', 'create' ]
args.append(name)
if stripe:
args.append('stripe')
args.append(str(stripe))
if replica:
args.append('replica')
args.append(str(replica))
args.append('transport')
args.append(transport)
for brick in bricks:
for host in hosts:
args.append(('%s:%s' % (host, brick)))
if force:
args.append('force')
run_gluster(args)
def start_volume(name):
run_gluster([ 'volume', 'start', name ])
def stop_volume(name):
run_gluster_yes([ 'volume', 'stop', name ])
def set_volume_option(name, option, parameter):
run_gluster([ 'volume', 'set', name, option, parameter ])
def add_brick(name, brick, force):
args = [ 'volume', 'add-brick', name, brick ]
if force:
args.append('force')
run_gluster(args)
def do_rebalance(name):
run_gluster([ 'volume', 'rebalance', name, 'start' ])
def enable_quota(name):
run_gluster([ 'volume', 'quota', name, 'enable' ])
def set_quota(name, directory, value):
run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ])
def main():
### MAIN ###
global module
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, default=None, aliases=['volume']),
state=dict(required=True, choices=[ 'present', 'absent', 'started', 'stopped', 'rebalanced' ]),
cluster=dict(required=False, default=None, type='list'),
host=dict(required=False, default=None),
stripes=dict(required=False, default=None, type='int'),
replicas=dict(required=False, default=None, type='int'),
transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]),
bricks=dict(required=False, default=None, aliases=['brick']),
start_on_create=dict(required=False, default=True, type='bool'),
rebalance=dict(required=False, default=False, type='bool'),
options=dict(required=False, default={}, type='dict'),
quota=dict(required=False),
directory=dict(required=False, default=None),
force=dict(required=False, default=False, type='bool'),
)
)
global glusterbin
glusterbin = module.get_bin_path('gluster', True)
changed = False
action = module.params['state']
volume_name = module.params['name']
cluster= module.params['cluster']
brick_paths = module.params['bricks']
stripes = module.params['stripes']
replicas = module.params['replicas']
transport = module.params['transport']
myhostname = module.params['host']
start_on_create = module.boolean(module.params['start_on_create'])
rebalance = module.boolean(module.params['rebalance'])
force = module.boolean(module.params['force'])
if not myhostname:
myhostname = socket.gethostname()
# Clean up if last element is empty. Consider that yml can look like this:
# cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
if cluster != None and cluster[-1] == '':
cluster = cluster[0:-1]
if brick_paths != None and "," in brick_paths:
brick_paths = brick_paths.split(",")
else:
brick_paths = [brick_paths]
options = module.params['options']
quota = module.params['quota']
directory = module.params['directory']
# get current state info
peers = get_peers()
volumes = get_volumes()
quotas = {}
if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
quotas = get_quotas(volume_name, True)
# do the work!
if action == 'absent':
if volume_name in volumes:
if volumes[volume_name]['status'].lower() != 'stopped':
stop_volume(volume_name)
run_gluster_yes([ 'volume', 'delete', volume_name ])
changed = True
if action == 'present':
probe_all_peers(cluster, peers, myhostname)
# create if it doesn't exist
if volume_name not in volumes:
create_volume(volume_name, stripes, replicas, transport, cluster, brick_paths, force)
volumes = get_volumes()
changed = True
if volume_name in volumes:
if volumes[volume_name]['status'].lower() != 'started' and start_on_create:
start_volume(volume_name)
changed = True
# switch bricks
new_bricks = []
removed_bricks = []
all_bricks = []
for node in cluster:
for brick_path in brick_paths:
brick = '%s:%s' % (node, brick_path)
all_bricks.append(brick)
if brick not in volumes[volume_name]['bricks']:
new_bricks.append(brick)
# this module does not yet remove bricks, but we check those anyways
for brick in volumes[volume_name]['bricks']:
if brick not in all_bricks:
removed_bricks.append(brick)
for brick in new_bricks:
add_brick(volume_name, brick, force)
changed = True
# handle quotas
if quota:
if not volumes[volume_name]['quota']:
enable_quota(volume_name)
quotas = get_quotas(volume_name, False)
if directory not in quotas or quotas[directory] != quota:
set_quota(volume_name, directory, quota)
changed = True
# set options
for option in options.keys():
if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
set_volume_option(volume_name, option, options[option])
changed = True
else:
module.fail_json(msg='failed to create volume %s' % volume_name)
if volume_name not in volumes:
module.fail_json(msg='volume not found %s' % volume_name)
if action == 'started':
if volumes[volume_name]['status'].lower() != 'started':
start_volume(volume_name)
changed = True
if action == 'stopped':
if volumes[volume_name]['status'].lower() != 'stopped':
stop_volume(volume_name)
changed = True
if changed:
volumes = get_volumes()
if rebalance:
do_rebalance(volume_name)
facts = {}
facts['glusterfs'] = { 'peers': peers, 'volumes': volumes, 'quotas': quotas }
module.exit_json(changed=changed, ansible_facts=facts)
# import module snippets
from ansible.module_utils.basic import *
main()
| yfauser/ansible-modules-extras | system/gluster_volume.py | Python | gpl-3.0 | 14,777 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Project: Create a Proxy Class
#
# In this assignment, create a proxy class (one is started for you
# below). You should be able to initialize the proxy object with any
# object. Any attributes called on the proxy object should be forwarded
# to the target object. As each attribute call is sent, the proxy should
# record the name of the attribute sent.
#
# The proxy class is started for you. You will need to add a method
# missing handler and any other supporting methods. The specification
# of the Proxy class is given in the AboutProxyObjectProject koan.
# Note: This is a bit trickier than its Ruby Koans counterpart, but you
# can do it!
from runner.koan import *
class Proxy(object):
def __init__(self, target_object):
# WRITE CODE HERE
#initialize '_obj' attribute last. Trust me on this!
self._obj = target_object
# WRITE CODE HERE
# The proxy object should pass the following Koan:
#
class AboutProxyObjectProject(Koan):
def test_proxy_method_returns_wrapped_object(self):
# NOTE: The Television class is defined below
tv = Proxy(Television())
self.assertTrue(isinstance(tv, Proxy))
def test_tv_methods_still_perform_their_function(self):
tv = Proxy(Television())
tv.channel = 10
tv.power()
self.assertEqual(10, tv.channel)
self.assertTrue(tv.is_on())
def test_proxy_records_messages_sent_to_tv(self):
tv = Proxy(Television())
tv.power()
tv.channel = 10
self.assertEqual(['power', 'channel'], tv.messages())
def test_proxy_handles_invalid_messages(self):
tv = Proxy(Television())
ex = None
try:
tv.no_such_method()
except AttributeError as ex:
pass
self.assertEqual(AttributeError, type(ex))
def test_proxy_reports_methods_have_been_called(self):
tv = Proxy(Television())
tv.power()
tv.power()
self.assertTrue(tv.was_called('power'))
self.assertFalse(tv.was_called('channel'))
def test_proxy_counts_method_calls(self):
tv = Proxy(Television())
tv.power()
tv.channel = 48
tv.power()
self.assertEqual(2, tv.number_of_times_called('power'))
self.assertEqual(1, tv.number_of_times_called('channel'))
self.assertEqual(0, tv.number_of_times_called('is_on'))
def test_proxy_can_record_more_than_just_tv_objects(self):
proxy = Proxy("Py Ohio 2010")
result = proxy.upper()
self.assertEqual("PY OHIO 2010", result)
result = proxy.split()
self.assertEqual(["Py", "Ohio", "2010"], result)
self.assertEqual(['upper', 'split'], proxy.messages())
# ====================================================================
# The following code is to support the testing of the Proxy class. No
# changes should be necessary to anything below this comment.
# Example class using in the proxy testing above.
class Television(object):
def __init__(self):
self._channel = None
self._power = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
def power(self):
if self._power == 'on':
self._power = 'off'
else:
self._power = 'on'
def is_on(self):
return self._power == 'on'
# Tests for the Television class. All of theses tests should pass.
class TelevisionTest(Koan):
def test_it_turns_on(self):
tv = Television()
tv.power()
self.assertTrue(tv.is_on())
def test_it_also_turns_off(self):
tv = Television()
tv.power()
tv.power()
self.assertFalse(tv.is_on())
def test_edge_case_on_off(self):
tv = Television()
tv.power()
tv.power()
tv.power()
self.assertTrue(tv.is_on())
tv.power()
self.assertFalse(tv.is_on())
def test_can_set_the_channel(self):
tv = Television()
tv.channel = 11
self.assertEqual(11, tv.channel)
| Krakn/learning | src/python/python_koans/python2/about_proxy_object_project.py | Python | isc | 4,194 |
#!/usr/bin/env python
def main():
return 0
if __name__ == '__main__': main()
var = 'hi'
var2 = 'hi' # blah = blah
| R1dO/geany | tests/ctags/test.py | Python | gpl-2.0 | 120 |
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The tests for the exception classes.
TODO:
- This is undertested. HTML is never checked
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import exceptions
from werkzeug.wrappers import Response
from werkzeug._compat import text_type
class ExceptionsTestCase(WerkzeugTestCase):
def test_proxy_exception(self):
orig_resp = Response('Hello World')
try:
exceptions.abort(orig_resp)
except exceptions.HTTPException as e:
resp = e.get_response({})
else:
self.fail('exception not raised')
self.assert_true(resp is orig_resp)
self.assert_equal(resp.get_data(), b'Hello World')
def test_aborter(self):
abort = exceptions.abort
self.assert_raises(exceptions.BadRequest, abort, 400)
self.assert_raises(exceptions.Unauthorized, abort, 401)
self.assert_raises(exceptions.Forbidden, abort, 403)
self.assert_raises(exceptions.NotFound, abort, 404)
self.assert_raises(exceptions.MethodNotAllowed, abort, 405, ['GET', 'HEAD'])
self.assert_raises(exceptions.NotAcceptable, abort, 406)
self.assert_raises(exceptions.RequestTimeout, abort, 408)
self.assert_raises(exceptions.Gone, abort, 410)
self.assert_raises(exceptions.LengthRequired, abort, 411)
self.assert_raises(exceptions.PreconditionFailed, abort, 412)
self.assert_raises(exceptions.RequestEntityTooLarge, abort, 413)
self.assert_raises(exceptions.RequestURITooLarge, abort, 414)
self.assert_raises(exceptions.UnsupportedMediaType, abort, 415)
self.assert_raises(exceptions.UnprocessableEntity, abort, 422)
self.assert_raises(exceptions.InternalServerError, abort, 500)
self.assert_raises(exceptions.NotImplemented, abort, 501)
self.assert_raises(exceptions.BadGateway, abort, 502)
self.assert_raises(exceptions.ServiceUnavailable, abort, 503)
myabort = exceptions.Aborter({1: exceptions.NotFound})
self.assert_raises(LookupError, myabort, 404)
self.assert_raises(exceptions.NotFound, myabort, 1)
myabort = exceptions.Aborter(extra={1: exceptions.NotFound})
self.assert_raises(exceptions.NotFound, myabort, 404)
self.assert_raises(exceptions.NotFound, myabort, 1)
def test_exception_repr(self):
exc = exceptions.NotFound()
self.assert_equal(text_type(exc), '404: Not Found')
self.assert_equal(repr(exc), "<NotFound '404: Not Found'>")
exc = exceptions.NotFound('Not There')
self.assert_equal(text_type(exc), '404: Not Found')
self.assert_equal(repr(exc), "<NotFound '404: Not Found'>")
def test_special_exceptions(self):
exc = exceptions.MethodNotAllowed(['GET', 'HEAD', 'POST'])
h = dict(exc.get_headers({}))
self.assert_equal(h['Allow'], 'GET, HEAD, POST')
self.assert_true('The method is not allowed' in exc.get_description())
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExceptionsTestCase))
return suite
| l8orre/nxtBridge | werkzeug/testsuite/exceptions.py | Python | mit | 3,325 |
import sys
from django.contrib.auth.models import Group
from django.template import Context, Engine, TemplateSyntaxError
from django.template.base import UNKNOWN_SOURCE
from django.test import SimpleTestCase, override_settings
from django.urls import NoReverseMatch
from django.utils import translation
class TemplateTests(SimpleTestCase):
def test_string_origin(self):
template = Engine().from_string('string template')
self.assertEqual(template.origin.name, UNKNOWN_SOURCE)
self.assertIsNone(template.origin.loader_name)
self.assertEqual(template.source, 'string template')
@override_settings(SETTINGS_MODULE=None)
def test_url_reverse_no_settings_module(self):
"""
#9005 -- url tag shouldn't require settings.SETTINGS_MODULE to
be set.
"""
t = Engine(debug=True).from_string('{% url will_not_match %}')
c = Context()
with self.assertRaises(NoReverseMatch):
t.render(c)
def test_url_reverse_view_name(self):
"""
#19827 -- url tag should keep original strack trace when reraising
exception.
"""
t = Engine().from_string('{% url will_not_match %}')
c = Context()
try:
t.render(c)
except NoReverseMatch:
tb = sys.exc_info()[2]
depth = 0
while tb.tb_next is not None:
tb = tb.tb_next
depth += 1
self.assertGreater(depth, 5, "The traceback context was lost when reraising the traceback.")
def test_no_wrapped_exception(self):
"""
# 16770 -- The template system doesn't wrap exceptions, but annotates
them.
"""
engine = Engine(debug=True)
c = Context({"coconuts": lambda: 42 / 0})
t = engine.from_string("{{ coconuts }}")
with self.assertRaises(ZeroDivisionError) as e:
t.render(c)
debug = e.exception.template_debug
self.assertEqual(debug['start'], 0)
self.assertEqual(debug['end'], 14)
def test_invalid_block_suggestion(self):
"""
Error messages should include the unexpected block name and be in all
English.
"""
engine = Engine()
msg = (
"Invalid block tag on line 1: 'endblock', expected 'elif', 'else' "
"or 'endif'. Did you forget to register or load this tag?"
)
with self.settings(USE_I18N=True), translation.override('de'):
with self.assertRaisesMessage(TemplateSyntaxError, msg):
engine.from_string("{% if 1 %}lala{% endblock %}{% endif %}")
def test_unknown_block_tag(self):
engine = Engine()
msg = (
"Invalid block tag on line 1: 'foobar'. Did you forget to "
"register or load this tag?"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
engine.from_string("lala{% foobar %}")
def test_compile_filter_expression_error(self):
"""
19819 -- Make sure the correct token is highlighted for
FilterExpression errors.
"""
engine = Engine(debug=True)
msg = "Could not parse the remainder: '@bar' from 'foo@bar'"
with self.assertRaisesMessage(TemplateSyntaxError, msg) as e:
engine.from_string("{% if 1 %}{{ foo@bar }}{% endif %}")
debug = e.exception.template_debug
self.assertEqual((debug['start'], debug['end']), (10, 23))
self.assertEqual((debug['during']), '{{ foo@bar }}')
def test_compile_tag_error(self):
"""
Errors raised while compiling nodes should include the token
information.
"""
engine = Engine(
debug=True,
libraries={'bad_tag': 'template_tests.templatetags.bad_tag'},
)
with self.assertRaises(RuntimeError) as e:
engine.from_string("{% load bad_tag %}{% badtag %}")
self.assertEqual(e.exception.template_debug['during'], '{% badtag %}')
def test_compile_tag_error_27584(self):
engine = Engine(
app_dirs=True,
debug=True,
libraries={'tag_27584': 'template_tests.templatetags.tag_27584'},
)
t = engine.get_template('27584_parent.html')
with self.assertRaises(TemplateSyntaxError) as e:
t.render(Context())
self.assertEqual(e.exception.template_debug['during'], '{% badtag %}')
def test_compile_tag_error_27956(self):
"""Errors in a child of {% extends %} are displayed correctly."""
engine = Engine(
app_dirs=True,
debug=True,
libraries={'tag_27584': 'template_tests.templatetags.tag_27584'},
)
t = engine.get_template('27956_child.html')
with self.assertRaises(TemplateSyntaxError) as e:
t.render(Context())
self.assertEqual(e.exception.template_debug['during'], '{% badtag %}')
def test_super_errors(self):
"""
#18169 -- NoReverseMatch should not be silence in block.super.
"""
engine = Engine(app_dirs=True)
t = engine.get_template('included_content.html')
with self.assertRaises(NoReverseMatch):
t.render(Context())
def test_debug_tag_non_ascii(self):
"""
#23060 -- Test non-ASCII model representation in debug output.
"""
group = Group(name="清風")
c1 = Context({"objs": [group]})
t1 = Engine().from_string('{% debug %}')
self.assertIn("清風", t1.render(c1))
def test_extends_generic_template(self):
"""
#24338 -- Allow extending django.template.backends.django.Template
objects.
"""
engine = Engine()
parent = engine.from_string('{% block content %}parent{% endblock %}')
child = engine.from_string(
'{% extends parent %}{% block content %}child{% endblock %}')
self.assertEqual(child.render(Context({'parent': parent})), 'child')
def test_node_origin(self):
"""
#25848 -- Set origin on Node so debugging tools can determine which
template the node came from even if extending or including templates.
"""
template = Engine().from_string('content')
for node in template.nodelist:
self.assertEqual(node.origin, template.origin)
| edmorley/django | tests/template_tests/tests.py | Python | bsd-3-clause | 6,410 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("migrations", "0001_initial"),
]
operations = [
migrations.DeleteModel("Tribble"),
migrations.RemoveField("Author", "silly_field"),
migrations.AddField("Author", "rating", models.IntegerField(default=0)),
migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.Author", models.SET_NULL, null=True)),
],
)
]
| edmorley/django | tests/migrations/test_migrations_no_changes/0002_second.py | Python | bsd-3-clause | 609 |
""" Utility functions related to HTTP requests """
import re
from django.conf import settings
from microsite_configuration import microsite
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
COURSE_REGEX = re.compile(r'^.*?/courses/{}'.format(settings.COURSE_ID_PATTERN))
def safe_get_host(request):
"""
Get the host name for this request, as safely as possible.
If ALLOWED_HOSTS is properly set, this calls request.get_host;
otherwise, this returns whatever settings.SITE_NAME is set to.
This ensures we will never accept an untrusted value of get_host()
"""
if isinstance(settings.ALLOWED_HOSTS, (list, tuple)) and '*' not in settings.ALLOWED_HOSTS:
return request.get_host()
else:
return microsite.get_value('site_domain', settings.SITE_NAME)
def course_id_from_url(url):
"""
Extracts the course_id from the given `url`.
"""
if not url:
return None
match = COURSE_REGEX.match(url)
if match is None:
return None
course_id = match.group('course_id')
if course_id is None:
return None
try:
return SlashSeparatedCourseKey.from_deprecated_string(course_id)
except InvalidKeyError:
return None
| Semi-global/edx-platform | common/djangoapps/util/request.py | Python | agpl-3.0 | 1,285 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sparkonda documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import sparkonda
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sparkonda'
copyright = u'2015-2016, Moussa Taifi'
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = sparkonda.__version__
# The full version, including alpha/beta/rc tags.
release = sparkonda.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sparkondadoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'sparkonda.tex',
u'Sparkonda Documentation',
u'Moussa Taifi', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sparkonda',
u'Sparkonda Documentation',
[u'Moussa Taifi'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sparkonda',
u'Sparkonda Documentation',
u'Moussa Taifi',
'sparkonda',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| moutai/sparkonda | docs/conf.py | Python | isc | 8,343 |
import json, urllib, requests
from flask import Flask, session, redirect, request, url_for
app = Flask(__name__)
app.secret_key = 'insecureappsecretkey'
app.debug = True
# NOTE - these endpoints SHOULD be using HTTPS (for communication with the CAS server)
CAS_ADDR = 'http://localhost:3000/'
SERVICE_URL = 'http://localhost:3001/validateCASLogin'
CAS_LOGIN_ADDR = "".join([CAS_ADDR, "/login?service=", urllib.quote_plus(SERVICE_URL)])
CAS_CHECK_ADDR_TEMPLATE = "".join([CAS_ADDR, "/validate?", "service=", SERVICE_URL, "&ticket=%s"])
@app.route('/', methods=['GET'])
def index():
if 'userEmail' in session and 'userAttributes' in session:
return "<h2>Welcome user with email %s!</h2> <p> Attributes: <code>%s</code> </p>" % (session['userEmail'], session['userAttributes'])
else:
return "Hello %s! <br/> <a href=\"/login\">Login?</a>" % (session['userEmail'] if 'userEmail' in session else 'stranger')
@app.route('/login', methods=['GET'])
def login():
return redirect(CAS_LOGIN_ADDR)
@app.route('/logout', methods=['GET'])
def logout():
if 'userEmail' in session and 'userAttributes' in session:
del session['userEmail']
del session['userAttributes']
return 'Successfully logged out'
@app.route('/validateCASLogin', methods=['GET'])
def cas_validate():
ticket = request.args['ticket']
# Lookup ticket with CAS Server
lookup_addr = CAS_CHECK_ADDR_TEMPLATE % ticket
print("Lookup addr %s" % lookup_addr)
cas_resp = requests.get(lookup_addr).json()
print("Resp:", cas_resp)
# Error handling
if cas_resp['status'] == 'error':
return "Oh No! An error ocurred:<br/> <strong>%s</strong>" % cas_resp['message']
else:
session['userEmail'] = cas_resp['userEmail']
session['userAttributes'] = cas_resp['userAttributes']
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(port=3001)
| t3hmrman/casgo | reference_apps/python/reference_app.py | Python | isc | 1,929 |
print("\n###############################################")
print("##### 5.5.0 - Verbose Regular Expressions #####")
print("###############################################\n")
print("import re")
import re
print()
print("\
pattern = '''\n\
^ # beginning of string\n\
M{0,3} # thousands - 0 to 3 Ms\n\
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 Cs),\n\
# or 500-800 (D, followed by 0 to 3 Cs)\n\
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 Xs),\n\
# or 50-80 (L, followed by 0 to 3 Xs)\n\
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 Is),\n\
# or 5-8 (V, followed by 0 to 3 Is)\n\
$ # end of string\n\
'''\
")
pattern = '''
^ # beginning of string
M{0,3} # thousands - 0 to 3 Ms
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 Cs),
# or 500-800 (D, followed by 0 to 3 Cs)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 Xs),
# or 50-80 (L, followed by 0 to 3 Xs)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 Is),
# or 5-8 (V, followed by 0 to 3 Is)
$ # end of string
'''
print()
print("re.search(pattern, 'M', re.VERBOSE)")
print(" = ", re.search(pattern, 'M', re.VERBOSE))
print()
print("re.search(pattern, 'MCMLXXXIX', re.VERBOSE)")
print(" = ", re.search(pattern, 'MCMLXXXIX', re.VERBOSE))
print()
print("re.search(pattern, 'MMMDCCCLXXXVIII', re.VERBOSE)")
print(" = ", re.search(pattern, 'MMMDCCCLXXXVIII', re.VERBOSE))
print()
print("re.search(pattern, 'M')")
print(" = ", re.search(pattern, 'M'))
print()
| Krakn/learning | src/python/dive_input_python_3/chapters/ch05/ch05sc05.py | Python | isc | 1,857 |
'''
Created on Jul 22, 2011
@author: Rio
'''
from mclevelbase import *
import tempfile
from collections import defaultdict
import materials
log = logging.getLogger(__name__)
warn, error, info, debug = log.warn, log.error, log.info, log.debug
def computeChunkHeightMap(materials, blocks, HeightMap = None):
"""Computes the HeightMap array for a chunk, which stores the lowest
y-coordinate of each column where the sunlight is still at full strength.
The HeightMap array is indexed z,x contrary to the blocks array which is x,z,y.
If HeightMap is passed, fills it with the result and returns it. Otherwise, returns a
new array.
"""
lightAbsorption = materials.lightAbsorption[blocks]
heights = extractHeights(lightAbsorption)
heights = heights.swapaxes(0, 1)
if HeightMap is None:
return heights.astype('uint8')
else:
HeightMap[:] = heights
return HeightMap
def extractHeights(array):
""" Given an array of bytes shaped (x, z, y), return the coordinates of the highest
non-zero value in each y-column into heightMap
"""
#The fastest way I've found to do this is to make a boolean array with >0,
# then turn it upside down with ::-1 and use argmax to get the _first_ nonzero
# from each column.
w, h = array.shape[:2]
heightMap = zeros((w, h), 'uint16')
heights = argmax((array>0)[..., ::-1], 2)
heights = array.shape[2] - heights
#if the entire column is air, argmax finds the first air block and the result is a top height column
#top height columns won't ever have air in the top block so we can find air columns by checking for both
heights[(array[..., -1]==0) & (heights == array.shape[2])] = 0
heightMap[:] = heights
return heightMap
def getSlices(box, height):
""" call this method to iterate through a large slice of the world by
visiting each chunk and indexing its data with a subslice.
this returns an iterator, which yields 3-tuples containing:
+ a pair of chunk coordinates (cx,cz),
+ a x,z,y triplet of slices that can be used to index the InfdevChunk's data arrays,
+ a x,y,z triplet representing the relative location of this subslice within the requested world slice.
Note the different order of the coordinates between the 'slices' triplet
and the 'offset' triplet. x,z,y ordering is used only
to index arrays, since it reflects the order of the blocks in memory.
In all other places, including an entity's 'Pos', the order is x,y,z.
"""
#when yielding slices of chunks on the edge of the box, adjust the
#slices by an offset
minxoff, minzoff = box.minx - (box.mincx << 4), box.minz - (box.mincz << 4)
maxxoff, maxzoff = box.maxx - (box.maxcx << 4) + 16, box.maxz - (box.maxcz << 4) + 16
newMinY = 0
if box.miny < 0:
newMinY = -box.miny
miny = max(0, box.miny)
maxy = min(height, box.maxy)
for cx in range(box.mincx, box.maxcx):
localMinX = 0
localMaxX = 16
if cx == box.mincx:
localMinX = minxoff
if cx == box.maxcx - 1:
localMaxX = maxxoff
newMinX = localMinX + (cx << 4) - box.minx
newMaxX = localMaxX + (cx << 4) - box.minx
for cz in range(box.mincz, box.maxcz):
localMinZ = 0
localMaxZ = 16
if cz == box.mincz:
localMinZ = minzoff
if cz == box.maxcz - 1:
localMaxZ = maxzoff
newMinZ = localMinZ + (cz << 4) - box.minz
newMaxZ = localMaxZ + (cz << 4) - box.minz
slices, point = (
(slice(localMinX, localMaxX), slice(localMinZ, localMaxZ), slice(miny, maxy)),
(newMinX, newMinY, newMinZ)
)
yield (cx,cz), slices, point
class MCLevel(object):
""" MCLevel is an abstract class providing many routines to the different level types,
including a common copyEntitiesFrom built on class-specific routines, and
a dummy getChunk/allChunks for the finite levels.
MCLevel also provides compress and decompress methods that are used to load
NBT format levels, and expects subclasses to override shapeChunkData to
assign a shape to the Blocks and other arrays. The resulting arrays after
reshape must be indexed [x,z,y]
MCLevel subclasses must have Width, Length, and Height attributes. The first two are always zero for infinite levels.
Subclasses must also have Blocks, and optionally Data and BlockLight.
"""
###common to Creative, Survival and Indev. these routines assume
###self has Width, Height, Length, and Blocks
materials = classicMaterials
isInfinite = False
compressedTag = None
root_tag = None
Height = None
Length = None
Width = None
players = ["Player"]
dimNo = 0
parentWorld = None
world = None
@classmethod
def isLevel(cls, filename):
"""Tries to find out whether the given filename can be loaded
by this class. Returns True or False.
Subclasses should implement _isLevel, _isDataLevel, or _isTagLevel.
"""
if hasattr(cls, "_isLevel"):
return cls._isLevel(filename)
with file(filename) as f:
data = f.read()
if hasattr(cls, "_isDataLevel"):
return cls._isDataLevel(data)
if hasattr(cls, "_isTagLevel"):
try:
root_tag = nbt.load(filename, data)
except:
return False
return cls._isTagLevel(root_tag)
return False
def getWorldBounds(self):
return BoundingBox((0, 0, 0), self.size)
@property
def displayName(self):
return os.path.basename(self.filename)
@property
def size(self):
"Returns the level's dimensions as a tuple (X,Y,Z)"
return self.Width, self.Height, self.Length
@property
def bounds(self):
return BoundingBox((0, 0, 0), self.size)
def close(self): pass
# --- Compression ---
def compress(self): pass
def decompress(self):pass
# --- Entity Methods ---
def addEntity(self, entityTag): pass
def addEntities(self, entities): pass
def tileEntityAt(self, x, y, z): return None
def addTileEntity(self, entityTag): pass
def getEntitiesInBox(self, box): return []
def getTileEntitiesInBox(self, box): return []
def copyEntitiesFromIter(self, *args, **kw): yield;
def removeEntitiesInBox(self, box): pass
def removeTileEntitiesInBox(self, box): pass
# --- Chunked Format Emulation ---
def compressChunk(self, cx, cz): pass
@property
def loadedChunks(self):
return itertools.product(xrange(0, self.Width + 15 >> 4), xrange(0, self.Length + 15 >> 4))
@property
def chunkCount(self):
return (self.Width + 15 >> 4) * (self.Length + 15 >> 4)
@property
def allChunks(self):
"""Returns a synthetic list of chunk positions (xPos, zPos), to fake
being a chunked level format."""
return self.loadedChunks
def getChunks(self, chunks=None):
""" pass a list of chunk coordinate tuples to get an iterator yielding
InfdevChunks. pass nothing for an iterator of every chunk in the level.
the chunks are automatically loaded."""
if chunks is None: chunks = self.allChunks;
return (self.getChunk(cx, cz) for (cx, cz) in chunks if self.containsChunk(cx, cz))
def _getFakeChunkEntities(self, cx, cz):
"""Returns Entities, TileEntities"""
return [], []
def getChunk(self, cx, cz):
"""Synthesize a FakeChunk object representing the chunk at the given
position. Subclasses override fakeBlocksForChunk and fakeDataForChunk
to fill in the chunk arrays"""
f = FakeChunk()
f.world = self
f.chunkPosition = (cx, cz)
f.Blocks = self.fakeBlocksForChunk(cx, cz)
f.Data = self.fakeDataForChunk(cx, cz)
whiteLight = zeros_like(f.Blocks)
whiteLight[:] = 15
f.BlockLight = whiteLight
f.SkyLight = whiteLight
f.Entities, f.TileEntities = self._getFakeChunkEntities(cx, cz)
f.root_tag = TAG_Compound()
return f
def getAllChunkSlices(self):
slices = (slice(None), slice(None), slice(None),)
box = self.bounds
x, y, z = box.origin
for cpos in self.allChunks:
xPos, zPos = cpos
try:
chunk = self.getChunk(xPos, zPos)
except (ChunkMalformed, ChunkNotPresent):
continue
yield (chunk, slices, (xPos * 16 - x, 0, zPos * 16 - z))
def _getSlices(self, box):
if box == self.bounds:
info("All chunks selected! Selecting %s chunks instead of %s", self.chunkCount, box.chunkCount)
y = box.miny
slices = slice(0, 16), slice(0, 16), slice(0, box.maxy)
def getAllSlices():
for cPos in self.allChunks:
x, z = cPos
x *= 16
z *= 16
x -= box.minx
z -= box.minz
yield cPos, slices, (x, y, z)
return getAllSlices()
else:
return getSlices(box, self.Height)
def getChunkSlices(self, box):
return ((self.getChunk(*cPos), slices, point)
for cPos, slices, point in self._getSlices(box)
if self.containsChunk(*cPos))
def containsPoint(self, x, y, z):
return (x >= 0 and x < self.Width and
y >= 0 and y < self.Height and
z >= 0 and z < self.Length)
def containsChunk(self, cx, cz):
#w+15 to allow non 16 aligned schematics
return (cx >= 0 and cx < (self.Width + 15 >> 4) and
cz >= 0 and cz < (self.Length + 15 >> 4))
def chunkIsLoaded(self, cx, cz):
return self.containsChunk(cx, cz)
def chunkIsCompressed(self, cx, cz):
return False
def chunkIsDirty(self, cx, cz):
return True
def fakeBlocksForChunk(self, cx, cz):
#return a 16x16xH block array for rendering. Alpha levels can
#just return the chunk data. other levels need to reorder the
#indices and return a slice of the blocks.
cxOff = cx << 4
czOff = cz << 4
b = self.Blocks[cxOff:cxOff + 16, czOff:czOff + 16, 0:self.Height, ]
#(w, l, h) = b.shape
#if w<16 or l<16:
# b = resize(b, (16,16,h) )
return b
def fakeDataForChunk(self, cx, cz):
#Data is emulated for flexibility
cxOff = cx << 4
czOff = cz << 4
if hasattr(self, "Data"):
return self.Data[cxOff:cxOff + 16, czOff:czOff + 16, 0:self.Height, ]
else:
return zeros(shape=(16, 16, self.Height), dtype='uint8')
# --- Block accessors ---
def skylightAt(self, *args):
return 15
def setSkylightAt(self, *args): pass
def setBlockDataAt(self, x, y, z, newdata): pass
def blockDataAt(self, x, y, z): return 0;
def blockLightAt(self, x, y, z): return 15;
def blockAt(self, x, y, z):
if x < 0 or y < 0 or z < 0: return 0
if x >= self.Width or y >= self.Height or z >= self.Length: return 0;
return self.Blocks[x, z, y]
def setBlockAt(self, x, y, z, blockID):
if x < 0 or y < 0 or z < 0: return 0
if x >= self.Width or y >= self.Height or z >= self.Length: return 0;
self.Blocks[x, z, y] = blockID
# --- Fill and Replace ---
def blockReplaceTable(self, blocksToReplace):
blocktable = zeros((256, 16), dtype='bool')
for b in blocksToReplace:
if b.hasVariants:
blocktable[b.ID, b.blockData] = True
else:
blocktable[b.ID] = True
return blocktable
def fillBlocksIter(self, box, blockInfo, blocksToReplace=[]):
self.fillBlocks(box, blockInfo, blocksToReplace)
yield
def fillBlocks(self, box, blockInfo, blocksToReplace=[]):
if box is None:
box = self.bounds
else:
box = box.intersect(self.bounds)
info(u"Filling blocks in {0} with {1}, replacing{2}".format(box, blockInfo, blocksToReplace))
slices = map(slice, box.origin, box.maximum)
blocks = self.Blocks[slices[0], slices[2], slices[1]]
if len(blocksToReplace):
blocktable = self.blockReplaceTable(blocksToReplace)
shouldRetainData = (self.materials == alphaMaterials) and all([blockrotation.SameRotationType(blockInfo, b) for b in blocksToReplace])
if hasattr(self, "Data") and shouldRetainData:
data = self.Data[slices[0], slices[2], slices[1]]
mask = blocktable[blocks, data]
data[mask] = blockInfo.blockData
else:
mask = blocktable[blocks, 0]
blocks[mask] = blockInfo.ID
else:
blocks[:] = blockInfo.ID
if hasattr(self, "Data"):
self.Data[slices[0], slices[2], slices[1]] = blockInfo.blockData
# --- Transformations ---
def rotateLeft(self):
self.Blocks = swapaxes(self.Blocks, 1, 0)[:, ::-1, :] #x=z; z=-x
pass;
def roll(self):
self.Blocks = swapaxes(self.Blocks, 2, 0)[:, :, ::-1] #x=y; y=-x
pass
def flipVertical(self):
self.Blocks = self.Blocks[:, :, ::-1] #y=-y
pass
def flipNorthSouth(self):
self.Blocks = self.Blocks[::-1, :, :] #x=-x
pass
def flipEastWest(self):
self.Blocks = self.Blocks[:, ::-1, :] #z=-z
pass
# --- Copying ---
def copyBlocksFromFiniteToFinite(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy):
# assume destinationPoint is entirely within this level, and the size of sourceBox fits entirely within it.
sourcex, sourcey, sourcez = map(slice, sourceBox.origin, sourceBox.maximum)
destCorner2 = map(lambda a, b:a + b, sourceBox.size, destinationPoint)
destx, desty, destz = map(slice, destinationPoint, destCorner2)
sourceData = None
if hasattr(sourceLevel, 'Data'):
sourceData = sourceLevel.Data[sourcex, sourcez, sourcey]
convertedSourceBlocks, convertedSourceData = self.convertBlocksFromLevel(sourceLevel, sourceLevel.Blocks[sourcex, sourcez, sourcey], sourceData)
blocks = self.Blocks[destx, destz, desty]
mask = slice(None, None)
if not (blocksToCopy is None):
typemask = zeros(256, dtype='bool')
typemask[blocksToCopy] = True
mask = typemask[convertedSourceBlocks]
blocks[mask] = convertedSourceBlocks[mask]
if hasattr(self, 'Data') and hasattr(sourceLevel, 'Data'):
data = self.Data[destx, destz, desty]
data[mask] = convertedSourceData[mask]
def copyBlocksFromInfinite(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy):
return exhaust(self.copyBlocksFromInfinite(sourceLevel, sourceBox, destinationPoint, blocksToCopy))
def copyBlocksFromInfiniteIter(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy):
if blocksToCopy is not None:
typemask = zeros(256, dtype='bool')
typemask[blocksToCopy] = True
for i, (chunk, slices, point) in enumerate(sourceLevel.getChunkSlices(sourceBox)):
point = map(lambda a, b:a + b, point, destinationPoint)
point = point[0], point[2], point[1]
mask = slice(None, None)
convertedSourceBlocks, convertedSourceData = self.convertBlocksFromLevel(sourceLevel, chunk.Blocks[slices], chunk.Data[slices])
destSlices = [slice(p, p + s.stop - s.start) for p, s in zip(point, slices) ]
blocks = self.Blocks[ destSlices ]
if blocksToCopy is not None:
mask = typemask[convertedSourceBlocks]
blocks[mask] = convertedSourceBlocks[mask]
if hasattr(self, 'Data'):
data = self.Data[ destSlices ]
data[mask] = convertedSourceData[mask]
yield i
def adjustCopyParameters(self, sourceLevel, sourceBox, destinationPoint):
# if the destination box is outside the level, it and the source corners are moved inward to fit.
# ValueError is raised if the source corners are outside sourceLevel
(x, y, z) = map(int, destinationPoint)
sourceBox = BoundingBox(sourceBox.origin, sourceBox.size)
(lx, ly, lz) = sourceBox.size
debug(u"Asked to copy {0} blocks \n\tfrom {1} in {3}\n\tto {2} in {4}" .format (ly * lz * lx, sourceBox, destinationPoint, sourceLevel, self))
#clip the source ranges to this level's edges. move the destination point as needed.
#xxx abstract this
if y < 0:
sourceBox.origin[1] -= y
sourceBox.size[1] += y
y = 0
if y + sourceBox.size[1] > self.Height:
sourceBox.size[1] -= y + sourceBox.size[1] - self.Height
y = self.Height - sourceBox.size[1]
#for infinite levels, don't clip along those dimensions because the
#infinite copy func will just skip missing chunks
if self.Width != 0:
if x < 0:
sourceBox.origin[0] -= x
sourceBox.size[0] += x
x = 0
if x + sourceBox.size[0] > self.Width:
sourceBox.size[0] -= x + sourceBox.size[0] - self.Width
#x=self.Width-sourceBox.size[0]
if self.Length != 0:
if z < 0:
sourceBox.origin[2] -= z
sourceBox.size[2] += z
z = 0
if z + sourceBox.size[2] > self.Length:
sourceBox.size[2] -= z + sourceBox.size[2] - self.Length
#z=self.Length-sourceBox.size[2]
destinationPoint = (x, y, z)
return sourceBox, destinationPoint
def copyBlocksFrom(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy=None, entities=True, create=False):
return exhaust(self.copyBlocksFromIter(sourceLevel, sourceBox, destinationPoint, blocksToCopy, entities, create))
def copyBlocksFromIter(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy=None, entities=True, create=False):
if (not sourceLevel.isInfinite) and not(
sourceLevel.containsPoint(*sourceBox.origin) and
sourceLevel.containsPoint(*map(lambda x:x - 1, sourceBox.maximum))):
raise ValueError, "{0} cannot provide blocks between {1}".format(sourceLevel, sourceBox)
sourceBox, destinationPoint = self.adjustCopyParameters(sourceLevel, sourceBox, destinationPoint)
yield
if min(sourceBox.size) <= 0:
print "Empty source box, aborting"
return
info(u"Copying {0} blocks from {1} to {2}" .format (sourceBox.volume, sourceBox, destinationPoint))
if not sourceLevel.isInfinite:
self.copyBlocksFromFiniteToFinite(sourceLevel, sourceBox, destinationPoint, blocksToCopy)
else:
for i in self.copyBlocksFromInfiniteIter(sourceLevel, sourceBox, destinationPoint, blocksToCopy):
yield i
for i in self.copyEntitiesFromIter(sourceLevel, sourceBox, destinationPoint, entities):
yield i
def convertBlocksFromLevel(self, sourceLevel, blocks, blockData):
return materials.convertBlocks(self.materials, sourceLevel.materials, blocks, blockData)
def saveInPlace(self):
self.saveToFile(self.filename)
# --- Player Methods ---
def setPlayerPosition(self, pos, player="Player"):
pass;
def getPlayerPosition(self, player="Player"):
return 8, self.Height * 0.75, 8
def getPlayerDimension(self, player="Player"): return 0;
def setPlayerDimension(self, d, player="Player"): return;
def setPlayerSpawnPosition(self, pos, player=None):
pass;
def playerSpawnPosition(self, player=None):
return self.getPlayerPosition()
def setPlayerOrientation(self, yp, player="Player"):
pass
def getPlayerOrientation(self, player="Player"):
return -45., 0.
# --- Dummy Lighting Methods ---
def generateLights(self, dirtyChunks=None):
pass;
def generateLightsIter(self, dirtyChunks=None):
yield 0
class EntityLevel(MCLevel):
"""Abstract subclass of MCLevel that adds default entity behavior"""
def copyEntitiesFromInfiniteIter(self, sourceLevel, sourceBox, destinationPoint, entities):
chunkCount = sourceBox.chunkCount
i = 0
copyOffset = map(lambda x, y:x - y, destinationPoint, sourceBox.origin)
e = t = 0
for (chunk, slices, point) in sourceLevel.getChunkSlices(sourceBox):
yield (i, chunkCount)
i += 1
if entities:
e += len(chunk.Entities)
for entityTag in chunk.Entities:
x, y, z = Entity.pos(entityTag)
if (x, y, z) not in sourceBox: continue
eTag = Entity.copyWithOffset(entityTag, copyOffset)
self.addEntity(eTag)
t += len(chunk.TileEntities)
for tileEntityTag in chunk.TileEntities:
x, y, z = TileEntity.pos(tileEntityTag)
if (x, y, z) not in sourceBox: continue
eTag = TileEntity.copyWithOffset(tileEntityTag, copyOffset)
self.addTileEntity(eTag)
info("Copied {0} entities, {1} tile entities".format(e, t))
def copyEntitiesFromIter(self, sourceLevel, sourceBox, destinationPoint, entities=True):
#assume coords have already been adjusted by copyBlocks
#if not self.hasEntities or not sourceLevel.hasEntities: return;
sourcePoint0 = sourceBox.origin
sourcePoint1 = sourceBox.maximum
if sourceLevel.isInfinite:
for i in self.copyEntitiesFromInfiniteIter(sourceLevel, sourceBox, destinationPoint, entities):
yield i
else:
entsCopied = 0
tileEntsCopied = 0
copyOffset = map(lambda x, y:x - y, destinationPoint, sourcePoint0)
if entities:
for entity in sourceLevel.getEntitiesInBox(sourceBox):
eTag = Entity.copyWithOffset(entity, copyOffset)
self.addEntity(eTag)
entsCopied += 1
i = 0
for entity in sourceLevel.getTileEntitiesInBox(sourceBox):
i += 1
if i % 100 == 0:
yield
if not 'x' in entity: continue
eTag = TileEntity.copyWithOffset(entity, copyOffset)
try:
self.addTileEntity(eTag)
tileEntsCopied += 1
except ChunkNotPresent:
pass
yield
info(u"Copied {0} entities, {1} tile entities".format(entsCopied, tileEntsCopied))
def getEntitiesInBox(self, box):
"""Returns a list of references to entities in this chunk, whose positions are within box"""
return [ent for ent in self.Entities if Entity.pos(ent) in box]
def getTileEntitiesInBox(self, box):
"""Returns a list of references to tile entities in this chunk, whose positions are within box"""
return [ent for ent in self.TileEntities if TileEntity.pos(ent) in box]
def removeEntitiesInBox(self, box):
newEnts = []
for ent in self.Entities:
if Entity.pos(ent) in box:
continue
newEnts.append(ent)
entsRemoved = len(self.Entities) - len(newEnts)
debug("Removed {0} entities".format(entsRemoved))
self.Entities.value[:] = newEnts
return entsRemoved
def removeTileEntitiesInBox(self, box):
if not hasattr(self, "TileEntities"): return;
newEnts = []
for ent in self.TileEntities:
if TileEntity.pos(ent) in box:
continue
newEnts.append(ent)
entsRemoved = len(self.TileEntities) - len(newEnts)
debug("Removed {0} tile entities".format(entsRemoved))
self.TileEntities.value[:] = newEnts
return entsRemoved
def addEntities(self, entities):
for e in entities:
self.addEntity(e)
def addEntity(self, entityTag):
assert isinstance(entityTag, TAG_Compound)
self.Entities.append(entityTag)
self._fakeEntities = None
def tileEntityAt(self, x, y, z):
entities = []
for entityTag in self.TileEntities:
if TileEntity.pos(entityTag) == [x, y, z]:
entities.append(entityTag)
if len(entities) > 1:
info("Multiple tile entities found: {0}".format(entities))
if len(entities) == 0:
return None
return entities[0]
def addTileEntity(self, tileEntityTag):
assert isinstance(tileEntityTag, TAG_Compound)
def differentPosition(a):
return not ((tileEntityTag is a) or TileEntity.pos(a) == TileEntity.pos(tileEntityTag))
self.TileEntities.value[:] = filter(differentPosition, self.TileEntities)
self.TileEntities.append(tileEntityTag)
self._fakeEntities = None
_fakeEntities = None
def _getFakeChunkEntities(self, cx, cz):
"""distribute entities into sublists based on fake chunk position
_fakeEntities keys are (cx,cz) and values are (Entities, TileEntities)"""
if self._fakeEntities is None:
self._fakeEntities = defaultdict(lambda: ([], []))
for i, e in enumerate((self.Entities, self.TileEntities)):
for ent in e:
x, y, z = [Entity, TileEntity][i].pos(ent)
ecx, ecz = map(lambda x:(int(floor(x)) >> 4), (x, z))
self._fakeEntities[ecx, ecz][i].append(ent)
return self._fakeEntities[cx, cz]
class ChunkBase(EntityLevel):
dirty = False
needsLighting = False
Blocks = Data = SkyLight = BlockLight = HeightMap = NotImplemented #override these!
def load(self):pass
def compress(self):pass
def chunkChanged(self, needsLighting = True):
self.dirty = True
self.needsLighting = needsLighting or self.needsLighting
@property
def materials(self): return self.world.materials
class FakeChunk(ChunkBase):
@property
def HeightMap(self):
if hasattr(self, "_heightMap"):
return self._heightMap
self._heightMap = computeChunkHeightMap(self.materials, self.Blocks)
return self._heightMap
class LightedChunk(ChunkBase):
def isLoaded(self): return True
def generateHeightMap(self):
computeChunkHeightMap(self.materials, self.Blocks, self.HeightMap)
def chunkChanged(self, calcLighting=True):
""" You are required to call this function after you are done modifying
the chunk. Pass False for calcLighting if you know your changes will
not change any lights."""
if not self.isLoaded(): return;
self.dirty = True
self.needsLighting = calcLighting or self.needsLighting
self.generateHeightMap()
if calcLighting:
self.genFastLights()
def genFastLights(self):
self.SkyLight[:] = 0
if self.world.dimNo in (-1, 1):
return #no light in nether or the end
blocks = self.Blocks
la = self.world.materials.lightAbsorption
skylight = self.SkyLight
heightmap = self.HeightMap
for x, z in itertools.product(xrange(16), xrange(16)):
skylight[x, z, heightmap[z, x]:] = 15
lv = 15
for y in reversed(range(heightmap[z, x])):
lv -= (la[blocks[x, z, y]] or 1)
if lv <= 0:
break
skylight[x, z, y] = lv
| codewarrior0/pymclevel | level.py | Python | isc | 28,376 |
# -*- coding: utf-8 -*-
import logging
from transitions import Machine, logger
from .states import LogInState, SelectionState, ExitState
logger.setLevel(logging.INFO)
logging.basicConfig(filename='./testmanager.log', level=logging.DEBUG)
class Test_Manager(Machine):
def __init__(self):
self.login = LogInState(name='LogInState')
self.select = SelectionState(name='SelectionState')
self.exit = ExitState(name='ExitState')
self.states = ['initial',
self.login,
self.select,
self.exit]
Machine.__init__(self,
states=self.states,
initial='initial')
def poll(self):
self.next_state = self.states[self.state].do_work()
getattr(self, 'to_{}'.format(self.next_state))()
def start_manager(self):
getattr(self, 'to_LogInState')()
while True:
# print('iter is: ' + str(i) + " -model state is:" + tm.state)
self.poll()
if self.state == 'ExitState':
break
| trottmpq/test_manager | test_manager/test_manager.py | Python | mit | 1,111 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import re
from setuptools import find_packages, setup
def read(*parts):
"""
Read file content and return it as string.
"""
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*file_paths):
"""
Find package version from file.
"""
version_file = read(*file_paths)
version_match = re.search(r"""^__version__\s*=\s*(['"])(.+?)\1""",
version_file, re.M)
if version_match:
return version_match.group(2)
raise RuntimeError("Unable to find version string.")
setup(
name='tarantool-deque',
version=find_version('src', 'tarantool_deque', '__init__.py'),
license='MIT',
description='Python bindings for Tarantool delayed queue script',
long_description=read('README.rst'),
author='Vladimir Rudnyh',
author_email='[email protected]',
url='https://github.com/dreadatour/tarantool-deque-python',
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=[
'tarantool>0.4'
],
tests_require=[
'tarantool>0.4'
],
test_suite='tests',
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Database :: Front-Ends',
'Environment :: Console'
]
)
| dreadatour/tarantool-deque-python | setup.py | Python | mit | 1,654 |
import requests
from io import BytesIO
from urllib.parse import urlparse
from django.conf import settings
from django.core.files.storage import DefaultStorage
from django.http import Http404
from django.http import HttpResponse
from django.views.decorators.http import require_safe
def process_options(options):
parameters = {}
options = options or ''
for pair in options.split(';'):
key, value = pair.split(',', maxsplit=1)
parameters[key] = value
return parameters
@require_safe
def image_view(request, path=None, options=None):
if not path:
raise Http404('No path provided')
# Grab the default storage, to build the URL
storage = DefaultStorage()
# Optionaly check if the file exists in the storage
# Depending on your storage class, this might not be implemented or performs something outrageous like loading
# the entire file into memory
if getattr(settings, 'RSZIO_CHECK_EXISTS', False) and not storage.exists(path):
raise Http404('Image not found in storage')
# Get the full URL for the image
original_url = storage.url(path)
# Use urllip to pull out the host and path
parsed_url = urlparse(original_url)
# Build the URL
url = 'https://rsz.io/{host}{path}'.format(
host=parsed_url.hostname,
path=parsed_url.path,
)
# Build the rsz.io parameters
try:
parameters = process_options(options)
except:
# KISS: if invalid parameters are passed, raise a 404
raise Http404('Invalid rsz.io options')
# Grab the image
rszio_response = requests.get(url, parameters)
# Return
buffer_image = BytesIO(rszio_response.content)
buffer_image.seek(0)
response = HttpResponse(buffer_image, content_type=rszio_response.headers['content-type'])
# Set cache headers
if hasattr(settings, 'RSZIO_CACHE_CONTROL'):
try:
response['Cache-Control'] = 'max-age={}'.format(int(settings.RSZIO_CACHE_CONTROL))
except:
response['Cache-Control'] = settings.RSZIO_CACHE_CONTROL
return response
| g3rd/django-rszio | rszio/views.py | Python | mit | 2,110 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint
from flask import jsonify
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from flask.ext.login import login_required
import json
from oldhawaii_metadata.apps.api import digital_assets_views
from oldhawaii_metadata.extensions import csrf
from oldhawaii_metadata.extensions import store
from .utilities import get_image_size_from_url
digital_assets = Blueprint(
'digital_assets',
__name__,
template_folder='templates',
url_prefix='/digital_assets')
@digital_assets.route('/')
@login_required
def index():
res = digital_assets_views.get_all()
json_response = json.loads(res.data)
dig_assets = json_response.get('_items', None) if json_response else None
return render_template('digital_assets/index.html',
digital_assets=dig_assets)
@digital_assets.route('/upload', methods=['GET'])
@login_required
def upload_digital_asset():
return render_template('digital_assets/upload_digital_asset.html')
@digital_assets.route('/link', methods=['GET'])
@login_required
def link_digital_asset():
return render_template('digital_assets/link_digital_asset.html')
@digital_assets.route('/<string:id>', methods=['GET'])
@login_required
def view_digital_asset(id):
res = digital_assets_views.read(id)
dig_asset = json.loads(res.data)
return render_template('digital_assets/view_digital_asset.html',
digital_asset=dig_asset)
@digital_assets.route('/<string:id>/edit', methods=['GET', 'POST'])
@login_required
def edit_digital_asset(id):
return render_template('digital_assets/edit_digital_asset.html',
digital_asset_id=id)
@digital_assets.route('/<string:id>/delete', methods=['POST'])
@login_required
def delete_digital_asset(id):
digital_assets_views.delete(id)
return redirect(url_for('digital_assets.index'))
@csrf.exempt
@digital_assets.route('/upload/content', methods=['POST'])
@login_required
def upload_digital_asset_content():
provider = store.Provider(request.files.get('file'))
provider.save()
width, height = get_image_size_from_url(provider.absolute_url)
return jsonify({"image_url": provider.absolute_url,
"image_width": width or '',
"image_height": height or ''})
# vim: filetype=python
| oldhawaii/oldhawaii-metadata | www/oldhawaii_metadata/apps/digital_assets/views.py | Python | mit | 2,440 |
"""
Test specific settings.
"""
import os
import logging
if os.getenv('TRAVIS') is None:
from django_envie.workroom import convertfiletovars
convertfiletovars()
from .base import *
logging.disable(logging.CRITICAL)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'test.db')
}
}
| andela-uawili/django-bucketlist-application | bucketlist/bucketlist/settings/testing.py | Python | mit | 382 |
"""
This module contains the functions necessary to extract data from text files
and put it into numpy arrays to be usable by other functions.
"""
import os
import re
import numpy as np
class IO():
"""An object that points to a desired file location and either extracts data
from an existing text file, or writes data to a text file.
"""
def __init__(self, filename='', dir=''):
self.fn = os.path.join(dir, filename)
if os.path.exists(self.fn):
#print("%s found." % self.fn)
pass
else:
print("%s does not yet exist." % self.fn)
def get_values(self, structure, exten, filecheck, extractor):
"""For a given structure, identifies all of the relevant, current log
files. Runs filecheck to verify convergence, and then uses the extractor
to acquire the desired values from the file. The values arereturned as a state:value dictionary.
"""
path = self.fn
# Narrows it down to the appropriate log files.
logs = self.files_end_with(exten)
# Unpacks filetypes.
ftypes = {file:self.getcalctype(file) for file in logs}
try:
iter, state, type = (zip(*ftypes.values()))
# Removes invalid and outdated files, marking the log.
stateiter = {'S':0, 'T':0, 'P':0, 'D':0, 'Q':0}
for i in range(len(iter)):
if iter[i] > stateiter[state[i]]:
stateiter[state[i]] = iter[i]
values = {
v[1]:extractor(k, path) for (k,v) in ftypes.items()
if v[0] == stateiter[v[1]] and filecheck(k, path)}
except ValueError as e:
if "not enough values" in str(e):
values = {}
else:
raise e
except KeyError:
print("Invalid file in %s directory." % structure)
values = {}
# Return values packed in a dictionary.
return values
def getcalctype(self, file):
"""Takes a chemical computation file and gives the calc type labels,
based on the filename formulation: xxxxxxx_NSyyy.log, where x chars
refer to the structure name, N is the iteration number, S is the spin
state label, and yyy is the optimization type.
"""
labels = file.split('_')[-1]
iter = int(labels[0])
state = labels[1]
type = labels.split('.')[0][2:]
return (iter, state, type)
def appendline(self, line):
"""Useful for writing log files.
"""
with open(self.fn, 'a') as f:
f.write(line + '\n')
def files_end_with(self, suffix):
"""Returns a list of files ending with the given suffix.
"""
return list(filter(
lambda file: file.endswith(suffix),
os.listdir(self.fn)
))
def tail(self, lines=1, _buffer=4098):
"""Tail a file and get X lines from the end"""
with open(self.fn) as f:
# place holder for the lines found
lines_found = []
# block counter will be multiplied by buffer
# to get the block size from the end
block_counter = -1
# loop until we find X lines
while len(lines_found) < lines:
try:
f.seek(block_counter * _buffer, os.SEEK_END)
except IOError: # either file is too small, or too many lines requested
f.seek(0)
lines_found = f.readlines()
break
lines_found = f.readlines()
# we found enough lines, get out
if len(lines_found) > lines:
break
# decrement the block counter to get the
# next X bytes
block_counter -= 1
return lines_found[-lines:]
def head(self, lines=1):
"""Head a file and get X lines from the beginning"""
with open(self.fn) as f:
head = [next(f) for x in range(lines)]
return head
def lines(self):
"""Gives all lines from a file as a list"""
with open(self.fn, "rt", encoding='latin-1') as f:
lines = f.read().splitlines()
return list(lines)
def replace_vals(self, starttxt, endtxt, outfile):
"""Replaces all instances of starttxt with endtxt, printing the file as
outfile.
"""
with open(self.fn, "rt") as fin:
with open(outfile, "wt") as fout:
for line in fin:
fout.write(line.replace(starttxt, endtxt))
def replace_all_vals(self, keydict, outfile):
"""Same as replace_vals, but takes a dictionary of old:new text
replacements as the key.
"""
with open(self.fn, "rt") as fin:
with open(outfile, "wt") as fout:
for line in fin:
for old, new in keydict.items():
line = line.replace(old, new)
fout.write(line)
def extract_floats(str):
"""Takes a string and returns a list of floats in that string.
"""
return [float(n) for n in re.findall(r"[-+]?\d*\.\d+|\d+", str)]
def dict_values(dicts):
"""Takes a list of dictionaries and returns a list comprised of those
dictionary's values.
"""
results = []
for dict in dicts:
if dict:
results.append(*dict.values())
else:
results.append({})
return results | tristanbrown/whaler | whaler/dataprep.py | Python | mit | 5,735 |
from django.test import TestCase
from bluebox.converters import Sanitizer
class BlueboxSanitizerTestCase(TestCase):
def test_strip_tags(self):
test_html = ('<div><p>HelloWorld<b>boldtext</b></p>'
'<img src="http://bukk.it/l2internet2.gif" /></div>')
expected_output = "<div><p>HelloWorld</p></div>"
sanitizer = Sanitizer(tags=['img', 'b'])
self.assertEqual(expected_output, sanitizer.strip(content=test_html))
def test_remove_blacklisted_protocols(self):
test_html = '<a href="javascript:;"></a>'
expected_output = "<a></a>"
sanitizer = Sanitizer(protocols=['javascript'])
self.assertEqual(expected_output, sanitizer.strip(content=test_html))
def test_dont_remove_nonblacklisted_protocols(self):
test_html = '<a href="http://google.com"></a>'
sanitizer = Sanitizer(protocols=['javascript'])
self.assertEqual(test_html, sanitizer.strip(content=test_html))
def test_remove_blacklisted_attributes(self):
test_html = '<div style="color: papayawhip;" width="100%"></div>'
expected_output = '<div width="100%"></div>'
sanitizer = Sanitizer(attributes=['style'])
self.assertEqual(expected_output, sanitizer.strip(content=test_html))
| duner/django-bluebox | bluebox/tests/test_converter.py | Python | mit | 1,289 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 by Gaik Tamazian
# gaik (dot) tamazian (at) gmail (dot) com
import chromosomer.cli
import os
import shutil
import sys
import tempfile
import unittest
class TestFragmentSimulator(unittest.TestCase):
def setUp(self):
self.__output_dir = tempfile.NamedTemporaryFile().name
os.mkdir(self.__output_dir)
def test_simulator(self):
sys.argv = ['', 'simulator', '-g', '10', '-p', '5',
'--prefix', 'test_', '20', '100', '5',
self.__output_dir]
chromosomer.cli.chromosomer()
def tearDown(self):
if os.path.isdir(self.__output_dir):
shutil.rmtree(self.__output_dir)
| gtamazian/Chromosomer | tests/test_simulator.py | Python | mit | 731 |
#!/usr/bin/env python
import sys
import six
import agate
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from csvkit.utilities.csvstat import CSVStat, launch_new_instance
from tests.utils import CSVKitTestCase, ColumnsTests, EmptyFileTests, NamesTests
class TestCSVStat(CSVKitTestCase, ColumnsTests, EmptyFileTests, NamesTests):
Utility = CSVStat
def test_launch_new_instance(self):
with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/dummy.csv']):
launch_new_instance()
def test_runs(self):
self.get_output(['examples/test_utf8.csv'])
def test_columns(self):
output = self.get_output(['-c', '2', 'examples/testxls_converted.csv'])
self.assertNotIn('1. "text"', output)
self.assertIn('2. "date"', output)
def test_encoding(self):
self.get_output(['-e', 'latin1', 'examples/test_latin1.csv'])
def test_linenumbers(self):
output = self.get_output(['-c', '2', '--linenumbers', 'examples/dummy.csv'])
self.assertNotIn('1. "a"', output)
self.assertIn('2. "b"', output)
def test_no_header_row(self):
output = self.get_output(['-c', '2', '--no-header-row', 'examples/no_header_row.csv'])
self.assertNotIn('1. "a"', output)
self.assertIn('2. "b"', output)
def test_count_only(self):
output = self.get_output(['--count', 'examples/realdata/ks_1033_data.csv'])
self.assertEqual(output, 'Row count: 1575\n')
def test_unique(self):
output = self.get_output(['-c', 'county', 'examples/realdata/ks_1033_data.csv'])
six.assertRegex(self, output, r'Unique values:\s+73')
def test_max_length(self):
output = self.get_output(['-c', 'county', 'examples/realdata/ks_1033_data.csv'])
six.assertRegex(self, output, r'Longest value:\s+12')
def test_freq_list(self):
output = self.get_output(['examples/realdata/ks_1033_data.csv'])
self.assertIn('WYANDOTTE (123x)', output)
self.assertIn('SALINE (59x)', output)
self.assertNotIn('MIAMI (56x)', output)
def test_freq(self):
output = self.get_output(['examples/realdata/ks_1033_data.csv', '--freq'])
self.assertIn(' 1. state: { "KS": 1575 }', output)
def test_freq_count(self):
output = self.get_output(['examples/realdata/ks_1033_data.csv', '--freq-count', '1'])
self.assertIn('WYANDOTTE (123x)', output)
self.assertNotIn('FINNEY (103x)', output)
self.assertNotIn('MIAMI (56x)', output)
def test_csv(self):
output = self.get_output_as_io(['--csv', 'examples/realdata/ks_1033_data.csv'])
reader = agate.csv.reader(output)
header = next(reader)
self.assertEqual(header[1], 'column_name')
self.assertEqual(header[4], 'unique')
row = next(reader)
self.assertEqual(row[1], 'state')
self.assertEqual(row[2], 'Text')
self.assertEqual(row[5], '')
self.assertEqual(row[11], '2')
def test_csv_columns(self):
output = self.get_output_as_io(['--csv', '-c', '4', 'examples/realdata/ks_1033_data.csv'])
reader = agate.csv.reader(output)
header = next(reader)
self.assertEqual(header[1], 'column_name')
self.assertEqual(header[4], 'unique')
row = next(reader)
self.assertEqual(row[1], 'nsn')
self.assertEqual(row[2], 'Text')
self.assertEqual(row[5], '')
self.assertEqual(row[11], '16')
| onyxfish/csvkit | tests/test_utilities/test_csvstat.py | Python | mit | 3,548 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'profilerdock.ui'
#
# Created: Tue Apr 15 11:26:58 2014
# by: pyside-uic 0.2.15 running on PySide 1.2.1
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_profilerdock(object):
def setupUi(self, profilerdock):
profilerdock.setObjectName("profilerdock")
profilerdock.resize(234, 468)
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.gridLayout = QtGui.QGridLayout(self.dockWidgetContents)
self.gridLayout.setObjectName("gridLayout")
self.groupBox = QtGui.QGroupBox(self.dockWidgetContents)
self.groupBox.setEnabled(False)
self.groupBox.setObjectName("groupBox")
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName("verticalLayout")
self.shinyProfiler = QtGui.QCheckBox(self.groupBox)
self.shinyProfiler.setObjectName("shinyProfiler")
self.verticalLayout.addWidget(self.shinyProfiler)
self.shinyUpdateBtn = QtGui.QPushButton(self.groupBox)
self.shinyUpdateBtn.setEnabled(False)
self.shinyUpdateBtn.setObjectName("shinyUpdateBtn")
self.verticalLayout.addWidget(self.shinyUpdateBtn)
self.shinyClearBtn = QtGui.QPushButton(self.groupBox)
self.shinyClearBtn.setEnabled(False)
self.shinyClearBtn.setObjectName("shinyClearBtn")
self.verticalLayout.addWidget(self.shinyClearBtn)
self.shinyFlatBtn = QtGui.QPushButton(self.groupBox)
self.shinyFlatBtn.setEnabled(False)
self.shinyFlatBtn.setObjectName("shinyFlatBtn")
self.verticalLayout.addWidget(self.shinyFlatBtn)
self.shinyTreeBtn = QtGui.QPushButton(self.groupBox)
self.shinyTreeBtn.setEnabled(False)
self.shinyTreeBtn.setObjectName("shinyTreeBtn")
self.verticalLayout.addWidget(self.shinyTreeBtn)
self.gridLayout.addWidget(self.groupBox, 2, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 3, 0, 1, 1)
self.actionProfiler = QtGui.QCheckBox(self.dockWidgetContents)
self.actionProfiler.setObjectName("actionProfiler")
self.gridLayout.addWidget(self.actionProfiler, 0, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(20, 10, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem1, 1, 0, 1, 1)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem2, 2, 1, 1, 1)
profilerdock.setWidget(self.dockWidgetContents)
self.retranslateUi(profilerdock)
QtCore.QObject.connect(self.actionProfiler, QtCore.SIGNAL("toggled(bool)"), profilerdock.toggleActionProfiling)
QtCore.QObject.connect(self.shinyProfiler, QtCore.SIGNAL("toggled(bool)"), profilerdock.toggleShiny)
QtCore.QObject.connect(self.shinyClearBtn, QtCore.SIGNAL("clicked()"), profilerdock.shinyClear)
QtCore.QObject.connect(self.shinyFlatBtn, QtCore.SIGNAL("clicked()"), profilerdock.shinyFlatReport)
QtCore.QObject.connect(self.shinyTreeBtn, QtCore.SIGNAL("clicked()"), profilerdock.shinyTreeReport)
QtCore.QObject.connect(self.shinyProfiler, QtCore.SIGNAL("toggled(bool)"), self.shinyClearBtn.setEnabled)
QtCore.QObject.connect(self.shinyProfiler, QtCore.SIGNAL("toggled(bool)"), self.shinyFlatBtn.setEnabled)
QtCore.QObject.connect(self.shinyProfiler, QtCore.SIGNAL("toggled(bool)"), self.shinyTreeBtn.setEnabled)
QtCore.QObject.connect(self.shinyProfiler, QtCore.SIGNAL("toggled(bool)"), self.shinyUpdateBtn.setEnabled)
QtCore.QObject.connect(self.shinyUpdateBtn, QtCore.SIGNAL("clicked()"), profilerdock.shinyUpdate)
QtCore.QMetaObject.connectSlotsByName(profilerdock)
def retranslateUi(self, profilerdock):
profilerdock.setWindowTitle(QtGui.QApplication.translate("profilerdock", "Profiler", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("profilerdock", "Shiny Profiler", None, QtGui.QApplication.UnicodeUTF8))
self.shinyProfiler.setText(QtGui.QApplication.translate("profilerdock", "Enabled", None, QtGui.QApplication.UnicodeUTF8))
self.shinyUpdateBtn.setText(QtGui.QApplication.translate("profilerdock", "Update", None, QtGui.QApplication.UnicodeUTF8))
self.shinyClearBtn.setText(QtGui.QApplication.translate("profilerdock", "Clear", None, QtGui.QApplication.UnicodeUTF8))
self.shinyFlatBtn.setText(QtGui.QApplication.translate("profilerdock", "Flat Report", None, QtGui.QApplication.UnicodeUTF8))
self.shinyTreeBtn.setText(QtGui.QApplication.translate("profilerdock", "Tree Report", None, QtGui.QApplication.UnicodeUTF8))
self.actionProfiler.setText(QtGui.QApplication.translate("profilerdock", "Action Profiling", None, QtGui.QApplication.UnicodeUTF8))
| Vavius/moai-ide | editor/layout/profilerdock_ui.py | Python | mit | 5,122 |
#!/usr/bin/env python
"""Installer for gemini: a lightweight db framework for disease and population genetics.
https://github.com/arq5x/gemini
Handles installation of:
- Required third party software
- Required Python libraries
- Gemini application
- Associated data files
Requires: Python 2.7, git, and compilers (gcc, g++)
Run gemini_install.py -h for usage.
"""
import argparse
import platform
import os
import shutil
import subprocess
import sys
remotes = {"requirements":
"https://raw.githubusercontent.com/bgruening/gemini-versioned-install/master/requirements_0.9.1.txt",
"cloudbiolinux":
"https://github.com/chapmanb/cloudbiolinux.git",
"gemini":
"https://github.com/arq5x/gemini.git",
"anaconda":
"http://repo.continuum.io/miniconda/Miniconda-3.5.5-%s-x86_64.sh"}
def main(args):
check_dependencies()
work_dir = os.path.join(os.getcwd(), "tmpgemini_install")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
os.chdir(work_dir)
print "Installing isolated base python installation"
make_dirs(args)
anaconda = install_anaconda_python(args, remotes)
print "Installing gemini..."
install_conda_pkgs(anaconda)
gemini = install_gemini(anaconda, remotes, args.datadir, args.tooldir, args.sudo)
if args.install_tools:
cbl = get_cloudbiolinux(remotes["cloudbiolinux"])
fabricrc = write_fabricrc(cbl["fabricrc"], args.tooldir, args.datadir,
"ubuntu", args.sudo)
print "Installing associated tools..."
install_tools(gemini["fab"], cbl["tool_fabfile"], fabricrc)
os.chdir(work_dir)
install_data(gemini["python"], gemini["data_script"], args)
os.chdir(work_dir)
test_script = install_testbase(args.datadir, remotes["gemini"], gemini)
print "Finished: gemini, tools and data installed"
print " Tools installed in:\n %s" % args.tooldir
print " Data installed in:\n %s" % args.datadir
print " Run tests with:\n cd %s && bash %s" % (os.path.dirname(test_script),
os.path.basename(test_script))
print " NOTE: be sure to add %s/bin to your PATH." % args.tooldir
print " NOTE: Install data files for GERP_bp & CADD_scores (not installed by default).\n "
shutil.rmtree(work_dir)
def install_gemini(anaconda, remotes, datadir, tooldir, use_sudo):
"""Install gemini plus python dependencies inside isolated Anaconda environment.
"""
# Work around issue with distribute where asks for 'distribute==0.0'
# try:
# subprocess.check_call([anaconda["easy_install"], "--upgrade", "distribute"])
# except subprocess.CalledProcessError:
# try:
# subprocess.check_call([anaconda["pip"], "install", "--upgrade", "distribute"])
# except subprocess.CalledProcessError:
# pass
# Ensure latest version of fabric for running CloudBioLinux
subprocess.check_call([anaconda["pip"], "install", "fabric>=1.7.0"])
# allow downloads excluded in recent pip (1.5 or greater) versions
try:
p = subprocess.Popen([anaconda["pip"], "--version"], stdout=subprocess.PIPE)
pip_version = p.communicate()[0].split()[1]
except:
pip_version = ""
pip_compat = []
if pip_version >= "1.5":
for req in ["python-graph-core", "python-graph-dot"]:
pip_compat += ["--allow-external", req, "--allow-unverified", req]
subprocess.check_call([anaconda["pip"], "install"] + pip_compat + ["-r", remotes["requirements"]])
python_bin = os.path.join(anaconda["dir"], "bin", "python")
_cleanup_problem_files(anaconda["dir"])
_add_missing_inits(python_bin)
for final_name, ve_name in [("gemini", "gemini"), ("gemini_python", "python"),
("gemini_pip", "pip")]:
final_script = os.path.join(tooldir, "bin", final_name)
ve_script = os.path.join(anaconda["dir"], "bin", ve_name)
sudo_cmd = ["sudo"] if use_sudo else []
if os.path.lexists(final_script):
subprocess.check_call(sudo_cmd + ["rm", "-f", final_script])
else:
subprocess.check_call(sudo_cmd + ["mkdir", "-p", os.path.dirname(final_script)])
cmd = ["ln", "-s", ve_script, final_script]
subprocess.check_call(sudo_cmd + cmd)
library_loc = subprocess.check_output("%s -c 'import gemini; print gemini.__file__'" % python_bin,
shell=True)
return {"fab": os.path.join(anaconda["dir"], "bin", "fab"),
"data_script": os.path.join(os.path.dirname(library_loc.strip()), "install-data.py"),
"python": python_bin,
"cmd": os.path.join(anaconda["dir"], "bin", "gemini")}
def install_conda_pkgs(anaconda):
pkgs = ["bx-python=0.7.2", "conda=3.5.5", "cython=0.20.1", "ipython=2.1.0", "jinja2=2.7.2", "nose=1.3.3", "numpy=1.8.1",
"pip=1.5.6", "pycrypto=2.6.1", "pyparsing=2.0.1", "pysam=0.8.0", "pyyaml=3.11",
"pyzmq=14.3.0", "pandas=0.14.0", "scipy=0.14.0"]
channels = ["-c", "https://conda.binstar.org/bcbio"]
#subprocess.check_call([anaconda["conda"], "install", "--yes", "numpy"])
subprocess.check_call([anaconda["conda"], "install", "--yes"] + channels + pkgs)
def install_anaconda_python(args, remotes):
"""Provide isolated installation of Anaconda python.
http://docs.continuum.io/anaconda/index.html
"""
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
conda = os.path.join(bindir, "conda")
if platform.mac_ver()[0]:
distribution = "macosx"
else:
distribution = "linux"
if not os.path.exists(anaconda_dir) or not os.path.exists(conda):
if os.path.exists(anaconda_dir):
shutil.rmtree(anaconda_dir)
url = remotes["anaconda"] % ("MacOSX" if distribution == "macosx" else "Linux")
if not os.path.exists(os.path.basename(url)):
subprocess.check_call(["wget", url])
subprocess.check_call("bash %s -b -p %s" %
(os.path.basename(url), anaconda_dir), shell=True)
return {"conda": conda,
"pip": os.path.join(bindir, "pip"),
"easy_install": os.path.join(bindir, "easy_install"),
"dir": anaconda_dir}
def _add_missing_inits(python_bin):
"""pip/setuptools strips __init__.py files with namespace declarations.
I have no idea why, but this adds them back.
"""
library_loc = subprocess.check_output("%s -c 'import pygraph.classes.graph; "
"print pygraph.classes.graph.__file__'" % python_bin,
shell=True)
pygraph_init = os.path.normpath(os.path.join(os.path.dirname(library_loc.strip()), os.pardir,
"__init__.py"))
if not os.path.exists(pygraph_init):
with open(pygraph_init, "w") as out_handle:
out_handle.write("__import__('pkg_resources').declare_namespace(__name__)\n")
def _cleanup_problem_files(venv_dir):
"""Remove problem bottle items in PATH which conflict with site-packages
"""
for cmd in ["bottle.py", "bottle.pyc"]:
bin_cmd = os.path.join(venv_dir, "bin", cmd)
if os.path.exists(bin_cmd):
os.remove(bin_cmd)
def install_tools(fab_cmd, fabfile, fabricrc):
"""Install 3rd party tools used by Gemini using a custom CloudBioLinux flavor.
"""
tools = ["tabix", "grabix", "samtools", "bedtools"]
flavor_dir = os.path.join(os.getcwd(), "gemini-flavor")
if not os.path.exists(flavor_dir):
os.makedirs(flavor_dir)
with open(os.path.join(flavor_dir, "main.yaml"), "w") as out_handle:
out_handle.write("packages:\n")
out_handle.write(" - bio_nextgen\n")
out_handle.write("libraries:\n")
with open(os.path.join(flavor_dir, "custom.yaml"), "w") as out_handle:
out_handle.write("bio_nextgen:\n")
for tool in tools:
out_handle.write(" - %s\n" % tool)
cmd = [fab_cmd, "-f", fabfile, "-H", "localhost", "-c", fabricrc,
"install_biolinux:target=custom,flavor=%s" % flavor_dir]
subprocess.check_call(cmd)
def install_data(python_cmd, data_script, args):
"""Install biological data used by gemini.
"""
data_dir = os.path.join(args.datadir, "gemini_data") if args.sharedpy else args.datadir
cmd = [python_cmd, data_script, data_dir]
if args.install_data:
print "Installing gemini data..."
else:
cmd.append("--nodata")
subprocess.check_call(cmd)
def install_testbase(datadir, repo, gemini):
"""Clone or update gemini code so we have the latest test suite.
"""
gemini_dir = os.path.join(datadir, "gemini")
cur_dir = os.getcwd()
needs_git = True
if os.path.exists(gemini_dir):
os.chdir(gemini_dir)
try:
subprocess.check_call(["git", "pull", "origin", "master", "--tags"])
needs_git = False
except:
os.chdir(cur_dir)
shutil.rmtree(gemini_dir)
if needs_git:
os.chdir(os.path.split(gemini_dir)[0])
subprocess.check_call(["git", "clone", repo])
os.chdir(gemini_dir)
_update_testdir_revision(gemini["cmd"])
os.chdir(cur_dir)
return os.path.join(gemini_dir, "master-test.sh")
def _update_testdir_revision(gemini_cmd):
"""Update test directory to be in sync with a tagged installed version or development.
"""
try:
p = subprocess.Popen([gemini_cmd, "--version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
gversion = p.communicate()[0].split()[1]
except:
gversion = ""
tag = ""
if gversion:
try:
p = subprocess.Popen("git tag -l | grep %s" % gversion, stdout=subprocess.PIPE, shell=True)
tag = p.communicate()[0].strip()
except:
tag = ""
if tag:
subprocess.check_call(["git", "checkout", "tags/%s" % tag])
pass
else:
subprocess.check_call(["git", "reset", "--hard", "HEAD"])
def write_fabricrc(base_file, tooldir, datadir, distribution, use_sudo):
out_file = os.path.join(os.getcwd(), os.path.basename(base_file))
with open(base_file) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("system_install"):
line = "system_install = %s\n" % tooldir
elif line.startswith("local_install"):
line = "local_install = %s/install\n" % tooldir
elif line.startswith("data_files"):
line = "data_files = %s\n" % datadir
elif line.startswith("distribution"):
line = "distribution = %s\n" % distribution
elif line.startswith("use_sudo"):
line = "use_sudo = %s\n" % use_sudo
elif line.startswith("edition"):
line = "edition = minimal\n"
elif line.startswith("#galaxy_home"):
line = "galaxy_home = %s\n" % os.path.join(datadir, "galaxy")
out_handle.write(line)
return out_file
def make_dirs(args):
sudo_cmd = ["sudo"] if args.sudo else []
for dname in [args.datadir, args.tooldir]:
if not os.path.exists(dname):
subprocess.check_call(sudo_cmd + ["mkdir", "-p", dname])
username = subprocess.check_output("echo $USER", shell=True).strip()
subprocess.check_call(sudo_cmd + ["chown", username, dname])
def get_cloudbiolinux(repo):
base_dir = os.path.join(os.getcwd(), "cloudbiolinux")
if not os.path.exists(base_dir):
subprocess.check_call(["git", "clone", repo])
return {"fabricrc": os.path.join(base_dir, "config", "fabricrc.txt"),
"tool_fabfile": os.path.join(base_dir, "fabfile.py")}
def check_dependencies():
"""Ensure required tools for installation are present.
"""
print "Checking required dependencies..."
for cmd, url in [("git", "http://git-scm.com/"),
("wget", "http://www.gnu.org/software/wget/"),
("curl", "http://curl.haxx.se/")]:
try:
retcode = subprocess.call([cmd, "--version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
retcode = 127
if retcode == 127:
raise OSError("gemini requires %s (%s)" % (cmd, url))
else:
print " %s found" % cmd
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Automated installer for gemini framework.")
parser.add_argument("tooldir", help="Directory to install 3rd party software tools",
type=os.path.abspath)
parser.add_argument("datadir", help="Directory to install gemini data files",
type=os.path.abspath)
parser.add_argument("--nosudo", help="Specify we cannot use sudo for commands",
dest="sudo", action="store_false", default=True)
parser.add_argument("--notools", help="Do not install tool dependencies",
dest="install_tools", action="store_false", default=True)
parser.add_argument("--nodata", help="Do not install data dependencies",
dest="install_data", action="store_false", default=True)
parser.add_argument("--sharedpy", help=("Indicate we share an Anaconda Python directory with "
"another project. Creates unique gemini data directory."),
action="store_true", default=False)
if len(sys.argv) == 1:
parser.print_help()
else:
main(parser.parse_args())
| bgruening/gemini-versioned-install | gemini_install_0.9.1.py | Python | mit | 13,871 |
import click
from ghutil.showing import print_json
from ghutil.types import Repository
@click.command()
@Repository.option(
"-R",
"--repo",
"--repository",
"repo",
help="Repository to which the milestones belong",
)
@click.option("-v", "--verbose", is_flag=True, help="Show full response body")
@click.argument("milestone", nargs=-1)
def cli(repo, milestone, verbose):
"""Show milestone details"""
print_json(map(repo.milestone, milestone), verbose)
| jwodder/ghutil | src/ghutil/cli/milestone/show.py | Python | mit | 480 |
Subsets and Splits