repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Uberi/The-Mippits | mippits.py | 1 | 12759 | #!/usr/bin/env python3
def normalize(value):
return value & 0xFFFFFFFF
def signed(value):
return value - 0x100000000 if value & 0x80000000 else value
# from http://code.activestate.com/recipes/577977-get-single-keypress/, MIT licensed
try:
import tty, termios
except ImportError:
# Probably Windows.
try: import msvcrt
except ImportError: raise ImportError("getch not available")
else: getch = msvcrt.getch
else:
import sys
def getch():
"""
getch() -> key character
Read a single keypress from stdin and return the resulting character. Nothing is echoed to the console. This call will block if a keypress is not already available, but will not wait for Enter to be pressed.
If the pressed key was a modifier key, nothing will be detected; if it were a special function key, it may return the first character of of an escape sequence, leaving additional characters in the buffer.
"""
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class Mippit:
def __init__(self):
self.registers = [0] * 32
self.PC = 0
self.HI, self.LO = 0, 0
self.MEM = {}
self.offset = self.PC
self.tracing = False
def trace(self, instruction, comment = None):
if not self.tracing: return # tracing disabled
if comment is None:
print(instruction)
else:
print("[DEBUGGER] {:=#010x} {:<20}; {}".format(self.offset, instruction, comment))
def decode_execute(self, instruction):
r = self.registers
r[0] = 0 # reset the 0 register
d, s, t = (instruction >> 11) & 0b11111, (instruction >> 21) & 0b11111, (instruction >> 16) & 0b11111
i = instruction & 0b1111111111111111
if i & 0x8000: i -= 0x10000 # make sure we interpret the value as a signed 16 bit integer
if instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000100000: # add (add)
r[d] = normalize(r[s] + r[t])
self.trace("add ${}, ${}, ${}".format(d, s, t), "${}={}, ${}={}, ${}={}".format(d, r[d], s, r[s], t, r[t]))
elif instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000100010: # subtract (sub)
r[d] = normalize(r[s] - r[t])
self.trace("sub ${}, ${}, ${}".format(d, s, t), "${}={}, ${}={}, ${}={}".format(d, r[d], s, r[s], t, r[t]))
elif instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011000: # multiply (mult)
result = signed(r[s]) * signed(r[t])
self.HI, self.LO = normalize(result >> 32), normalize(result)
self.trace("mult ${}, ${}".format(s, t), "${}={}, ${}={}".format(s, r[s], t, r[t]))
elif instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011001: # multiply unsigned (multu)
result = r[s] * r[t]
self.HI, self.LO = normalize(result >> 32), normalize(result)
self.trace("multu ${}, ${}".format(s, t), "${}={}, ${}={}".format(s, r[s], t, r[t]))
elif instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011010: # divide (div)
self.HI, self.LO = normalize(signed(r[s]) % signed(r[t])), normalize(signed(r[s]) // signed(r[t]))
self.trace("div ${}, ${}".format(s, t), "${}={}, ${}={}".format(s, r[s], t, r[t]))
elif instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011011: # divide unsigned (divu)
self.HI, self.LO = r[s] % r[t], r[s] // r[t]
self.trace("divu ${}, ${}".format(s, t), "${}={}, ${}={}".format(s, r[s], t, r[t]))
elif instruction & 0b11111111111111110000011111111111 == 0b00000000000000000000000000010000: # move from high/remainder (mfhi)
r[d] = self.HI
self.trace("mfhi ${}".format(d), "${}={}".format(d, r[d]))
elif instruction & 0b11111111111111110000011111111111 == 0b00000000000000000000000000010010: # move from low/quotient (mflo)
r[d] = self.LO
self.trace("mflo ${}".format(d), "${}={}".format(d, r[d]))
elif instruction & 0b11111111111111110000011111111111 == 0b00000000000000000000000000010100: # load immediate and skip (lis)
assert self.PC % 4 == 0
r[d] = self.MEM[self.PC // 4] if self.PC // 4 in self.MEM else 0
self.PC = normalize(self.PC + 4)
self.trace("lis ${}".format(d), "${}={}".format(d, r[d]))
self.trace(".word {}".format(r[d]))
elif instruction & 0b11111100000000000000000000000000 == 0b10001100000000000000000000000000: # load word (lw)
address = normalize(r[s] + i)
assert address % 4 == 0
if address == 0xFFFF0004: # read from stdin
value = ord(getch())
assert 0 <= value <= 255, "Invalid character entered - character must be ASCII"
r[t] = value
else: r[t] = self.MEM[address // 4] if address // 4 in self.MEM else 0
self.trace("lw ${}, {}(${})".format(t, i, s), "${}={}, ${}={}".format(t, r[t], s, r[s]))
elif instruction & 0b11111100000000000000000000000000 == 0b10101100000000000000000000000000: # store word (sw)
address = normalize(r[s] + i)
assert address % 4 == 0, "Invalid address - not aligned to word boundary."
if address == 0xFFFF000C: # write to stdout
print(chr(r[t] & 0xFF), end="")
else: self.MEM[address // 4] = r[t]
self.trace("sw ${}, {}(${})".format(t, i, s), "${}={}, ${}={}".format(t, r[t], s, r[s]))
elif instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000101010: # set less than (slt)
r[d] = 1 if signed(r[s]) < signed(r[t]) else 0
self.trace("slt ${}, ${}, ${}".format(d, s, t), "${}={}, ${}={}, ${}={}".format(d, r[d], s, r[s], t, r[t]))
elif instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000101011: # set less than unsigned (sltu)
r[d] = 1 if r[s] < r[t] else 0
self.trace("sltu ${}, ${}, ${}".format(d, s, t), "${}={}, ${}={}, ${}={}".format(d, r[d], s, r[s], t, r[t]))
elif instruction & 0b11111100000000000000000000000000 == 0b00010000000000000000000000000000: # branch on equal (beq)
if r[s] == r[t]: self.PC = normalize(self.PC + i * 4)
self.trace("beq ${}, ${}, {}".format(s, t, i), "${}={}, ${}={}".format(s, r[s], t, r[t]))
elif instruction & 0b11111100000000000000000000000000 == 0b00010100000000000000000000000000: # branch on not equal (bne)
if r[s] != r[t]: self.PC = normalize(self.PC + i * 4)
self.trace("bne ${}, ${}, {}".format(s, t, i), "${}={}, ${}={}".format(s, r[s], t, r[t]))
elif instruction & 0b11111100000111111111111111111111 == 0b00000000000000000000000000001000: # jump register (jr)
self.PC = r[s]
self.trace("jr ${}".format(s), "${}={}".format(s, r[s]))
elif instruction & 0b11111100000111111111111111111111 == 0b00000000000000000000000000001001: # jump and link register (jalr)
temp = r[s]
r[31] = self.PC
self.PC = temp
self.trace("jalr ${}".format(s), "${}={}".format(s, r[s]))
else: raise ValueError("Unknown instruction: {:=#010x}".format(instruction))
def load(self, code, offset = 0): # load binary code into memory
assert offset % 4 == 0, "Invalid offset - offset must be aligned to 32-bit word boundary"
offset //= 4 # get the offset in words
for i, word in enumerate(code_to_words(code)): self.MEM[i + offset] = word # copy the code into memory
self.registers[30] = 0x00000000
self.registers[31] = 0xFFFFFFFF
def load_hex(self, hex_code, offset = 0): # load hex code into memory
assert offset % 4 == 0, "Invalid offset - offset must be aligned to 32-bit word boundary"
offset //= 4
for i, word in enumerate(hex_to_words(hex_code)): self.MEM[i + offset] = word # copy the code into memory
self.registers[30] = 0x00000000
self.registers[31] = 0xFFFFFFFF
def step(self):
if self.PC == 0xFFFFFFFF: return False # jumped past end of memory, program ended
assert self.PC % 4 == 0, "Program counter must be aligned to word boundaries"
instruction = self.MEM[self.PC // 4] if self.PC // 4 in self.MEM else 0
self.offset = self.PC
self.PC = normalize(self.PC + 4)
self.decode_execute(instruction)
return True
def run(self, offset = 0):
self.PC = offset
while self.step(): pass
def code_to_words(code):
assert len(code) % 4 == 0, "Invalid code length - machine code must be collection of 32-bit words"
import struct
return [struct.unpack(">i", code[i * 4:i * 4 + 4])[0] for i in range(0, len(code) // 4)] # load each 4 bytes as a big endian 32-bit integer
def hex_to_words(hex_code):
assert len(hex_code) % 8 == 0, "Invalid code length - machine code must be collection of 32-bit words"
return [int(hex_code[i * 8:i * 8 + 8], 16) for i in range(0, len(hex_code) // 8)]
def decode(instruction):
d, s, t = (instruction >> 11) & 0b11111, (instruction >> 21) & 0b11111, (instruction >> 16) & 0b11111
i = instruction & 0b1111111111111111
if i & 0x8000: i -= 0x10000 # make sure we interpret the value as a signed 16 bit integer
if instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000100000: # add (add)
return "add ${}, ${}, ${}".format(d, s, t)
if instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000100010: # subtract (sub)
return "sub ${}, ${}, ${}".format(d, s, t)
if instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011000: # multiply (mult)
return "mult ${}, ${}".format(s, t)
if instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011001: # multiply unsigned (multu)
return "multu ${}, ${}".format(s, t)
if instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011010: # divide (div)
return "div ${}, ${}".format(s, t)
if instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011011: # divide unsigned (divu)
return "divu ${}, ${}".format(s, t)
if instruction & 0b11111111111111110000011111111111 == 0b00000000000000000000000000010000: # move from high/remainder (mfhi)
return "mfhi ${}".format(d)
if instruction & 0b11111111111111110000011111111111 == 0b00000000000000000000000000010010: # move from low/quotient (mflo)
return "mflo ${}".format(d)
if instruction & 0b11111111111111110000011111111111 == 0b00000000000000000000000000010100: # load immediate and skip (lis)
return "lis ${}".format(d)
if instruction & 0b11111100000000000000000000000000 == 0b10001100000000000000000000000000: # load word (lw)
return "lw ${}, {}(${})".format(t, i, s)
if instruction & 0b11111100000000000000000000000000 == 0b10101100000000000000000000000000: # store word (sw)
return "sw ${}, {}(${})".format(t, i, s)
if instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000101010: # set less than (slt)
return "slt ${}, ${}, ${}".format(d, s, t)
if instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000101011: # set less than unsigned (sltu)
return "sltu ${}, ${}, ${}".format(d, s, t)
if instruction & 0b11111100000000000000000000000000 == 0b00010000000000000000000000000000: # branch on equal (beq)
return "beq ${}, ${}, {}".format(s, t, i)
if instruction & 0b11111100000000000000000000000000 == 0b00010100000000000000000000000000: # branch on not equal (bne)
return "bne ${}, ${}, {}".format(s, t, i)
if instruction & 0b11111100000111111111111111111111 == 0b00000000000000000000000000001000: # jump register (jr)
return "jr ${}".format(s)
if instruction & 0b11111100000111111111111111111111 == 0b00000000000000000000000000001001: # jump and link register (jalr)
return "jalr ${}".format(s)
return ".word 0x{:X}".format(instruction)
if __name__ == "__main__":
mips = Mippit()
mips.load_hex("00201820004008200060102003e00008")
mips.registers[1], mips.registers[2] = 3, 4
mips.run()
print(mips.registers)
| mit | 1,875,588,031,979,448,800 | 59.757143 | 216 | 0.61243 | false |
julietbravo/microhh | schemes/laplbcneu.py | 5 | 3758 | from pylab import *
xsize = 0.5
igc = 3
def geterror(u, uref, n=2):
error = sqrt(sum((uref - u)**2.) / u.size)
return error
def refdata(n):
x = linspace(0.5*xsize/n, xsize-0.5*xsize/n, n)
u = cos(2.*pi*x/xsize)
return x, u
def laplx2nd(x, u):
istart = igc
iend = u.size + igc
dx = xsize / u.size
laplu = zeros(u.size)
ucalc = zeros(u.size + 2*igc)
ucalc[istart:iend] = u[:]
# # periodic bcs
# ucalc[0 :igc ] = u[u.size-igc:u.size]
# ucalc[iend:iend+igc] = u[0:igc]
# non-periodic bc
ucalc[istart-1] = ucalc[istart]
ucalc[iend ] = ucalc[iend-1]
for i in range(istart, iend):
laplu[i-igc] = (ucalc[i-1] - 2.*ucalc[i] + ucalc[i+1]) / (dx**2.)
lapluref = -4*pi**2./xsize**2. * cos(2.*pi*x/xsize)
erru = geterror(laplu, lapluref)
return laplu, lapluref, erru
def laplx4th(x, u):
istart = igc
iend = u.size + igc
dx = xsize / u.size
laplu = zeros(u.size)
ucalc = zeros(u.size + 2*igc)
ucalc[istart:iend] = u[:]
# periodic bcs
#ucalc[0 :igc ] = u[u.size-igc:u.size]
#ucalc[iend:iend+igc] = u[0:igc]
# ghost cell
#ucalc[istart-1] = (21.*ucalc[istart] + 3.*ucalc[istart+1] - ucalc[istart+2]) / 23.
#ucalc[iend ] = (21.*ucalc[iend-1] + 3.*ucalc[iend-2 ] - ucalc[iend-3 ]) / 23.
ucalc[istart-1] = (1360.*ucalc[istart] + 720.*ucalc[istart+1] - 400.*ucalc[istart+2] + 80.*ucalc[istart+3]) / 1760.
ucalc[iend ] = (1360.*ucalc[iend-1] + 720.*ucalc[iend-2 ] - 400.*ucalc[iend-3 ] + 80.*ucalc[iend-4 ]) / 1760.
i = istart
laplu[i-igc] = (11.*ucalc[i-1] - 20.*ucalc[i] + 6.*ucalc[i+1] + 4.*ucalc[i+2] - ucalc[i+3]) / (12.*dx**2.)
for i in range(istart+1, iend-1):
laplu[i-igc] = (-1.*(ucalc[i-2]+ucalc[i+2]) + 16.*(ucalc[i-1]+ucalc[i+1]) - 30.*ucalc[i]) / (12.*dx**2.)
i = iend-1
laplu[i-igc] = (11.*ucalc[i+1] - 20.*ucalc[i] + 6.*ucalc[i-1] + 4.*ucalc[i-2] - ucalc[i-3]) / (12.*dx**2.)
lapluref = -4*pi**2./xsize**2. * cos(2.*pi*x/xsize)
erru = geterror(laplu, lapluref)
return laplu, lapluref, erru
x8, u8 = refdata(8)
dx8 = xsize / 8.
lapl8_2nd, laplref8_2nd, err8_2nd = laplx2nd(x8, u8)
lapl8_4th, laplref8_4th, err8_4th = laplx4th(x8, u8)
x16, u16 = refdata(16)
dx16 = xsize / 16.
lapl16_2nd, laplref16_2nd, err16_2nd = laplx2nd(x16, u16)
lapl16_4th, laplref16_4th, err16_4th = laplx4th(x16, u16)
x32, u32 = refdata(32)
dx32 = xsize / 32.
lapl32_2nd, laplref32_2nd, err32_2nd = laplx2nd(x32, u32)
lapl32_4th, laplref32_4th, err32_4th = laplx4th(x32, u32)
x64, u64 = refdata(64)
dx64 = xsize / 64.
lapl64_2nd, laplref64_2nd, err64_2nd = laplx2nd(x64, u64)
lapl64_4th, laplref64_4th, err64_4th = laplx4th(x64, u64)
dxs = array([dx8 , dx16 , dx32 , dx64])
errs_2nd = array([err8_2nd, err16_2nd, err32_2nd, err64_2nd])
errs_4th = array([err8_4th, err16_4th, err32_4th, err64_4th])
print('convergence 2nd', (log(errs_2nd[-1])-log(errs_2nd[0])) / (log(dxs[-1])-log(dxs[0])) )
print('convergence 4th', (log(errs_4th[-1])-log(errs_4th[0])) / (log(dxs[-1])-log(dxs[0])) )
off2 = 8.
off3 = 5.2
off4 = 2.
slope2 = off2*(dxs[:] / dxs[0])**2.
slope3 = off3*(dxs[:] / dxs[0])**3.
slope4 = off4*(dxs[:] / dxs[0])**4.
close('all')
figure()
plot(x8 , lapl8_2nd , 'b-' , label="8_2nd" )
plot(x16, lapl16_2nd, 'g-' , label="16_2nd")
plot(x32, lapl32_2nd, 'r-' , label="32_2nd")
plot(x8 , lapl8_4th , 'b--', label="8_4th" )
plot(x16, lapl16_4th, 'g--', label="16_4th")
plot(x32, lapl32_4th, 'r--', label="32_4th")
legend(loc=4, frameon=False)
figure()
loglog(dxs, errs_2nd, 'bo-', label="g2nd")
loglog(dxs, errs_4th, 'go-', label="g4th")
loglog(dxs, slope2, 'k--', label="2nd")
loglog(dxs, slope3, 'k-.', label="3rd")
loglog(dxs, slope4, 'k:' , label="4th")
legend(loc=4, frameon=False)
#xlim(0.01, 0.2)
| gpl-3.0 | -3,060,007,071,365,627,000 | 29.064 | 117 | 0.592602 | false |
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/oauth2client/contrib/devshell.py | 8 | 4990 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utitilies for Google Developer Shell environment."""
import datetime
import json
import os
import socket
from oauth2client import _helpers
from oauth2client import client
DEVSHELL_ENV = 'DEVSHELL_CLIENT_PORT'
class Error(Exception):
"""Errors for this module."""
pass
class CommunicationError(Error):
"""Errors for communication with the Developer Shell server."""
class NoDevshellServer(Error):
"""Error when no Developer Shell server can be contacted."""
# The request for credential information to the Developer Shell client socket
# is always an empty PBLite-formatted JSON object, so just define it as a
# constant.
CREDENTIAL_INFO_REQUEST_JSON = '[]'
class CredentialInfoResponse(object):
"""Credential information response from Developer Shell server.
The credential information response from Developer Shell socket is a
PBLite-formatted JSON array with fields encoded by their index in the
array:
* Index 0 - user email
* Index 1 - default project ID. None if the project context is not known.
* Index 2 - OAuth2 access token. None if there is no valid auth context.
* Index 3 - Seconds until the access token expires. None if not present.
"""
def __init__(self, json_string):
"""Initialize the response data from JSON PBLite array."""
pbl = json.loads(json_string)
if not isinstance(pbl, list):
raise ValueError('Not a list: ' + str(pbl))
pbl_len = len(pbl)
self.user_email = pbl[0] if pbl_len > 0 else None
self.project_id = pbl[1] if pbl_len > 1 else None
self.access_token = pbl[2] if pbl_len > 2 else None
self.expires_in = pbl[3] if pbl_len > 3 else None
def _SendRecv():
"""Communicate with the Developer Shell server socket."""
port = int(os.getenv(DEVSHELL_ENV, 0))
if port == 0:
raise NoDevshellServer()
sock = socket.socket()
sock.connect(('localhost', port))
data = CREDENTIAL_INFO_REQUEST_JSON
msg = '{0}\n{1}'.format(len(data), data)
sock.sendall(_helpers._to_bytes(msg, encoding='utf-8'))
header = sock.recv(6).decode()
if '\n' not in header:
raise CommunicationError('saw no newline in the first 6 bytes')
len_str, json_str = header.split('\n', 1)
to_read = int(len_str) - len(json_str)
if to_read > 0:
json_str += sock.recv(to_read, socket.MSG_WAITALL).decode()
return CredentialInfoResponse(json_str)
class DevshellCredentials(client.GoogleCredentials):
"""Credentials object for Google Developer Shell environment.
This object will allow a Google Developer Shell session to identify its
user to Google and other OAuth 2.0 servers that can verify assertions. It
can be used for the purpose of accessing data stored under the user
account.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
"""
def __init__(self, user_agent=None):
super(DevshellCredentials, self).__init__(
None, # access_token, initialized below
None, # client_id
None, # client_secret
None, # refresh_token
None, # token_expiry
None, # token_uri
user_agent)
self._refresh(None)
def _refresh(self, http):
"""Refreshes the access token.
Args:
http: unused HTTP object
"""
self.devshell_response = _SendRecv()
self.access_token = self.devshell_response.access_token
expires_in = self.devshell_response.expires_in
if expires_in is not None:
delta = datetime.timedelta(seconds=expires_in)
self.token_expiry = client._UTCNOW() + delta
else:
self.token_expiry = None
@property
def user_email(self):
return self.devshell_response.user_email
@property
def project_id(self):
return self.devshell_response.project_id
@classmethod
def from_json(cls, json_data):
raise NotImplementedError(
'Cannot load Developer Shell credentials from JSON.')
@property
def serialization_data(self):
raise NotImplementedError(
'Cannot serialize Developer Shell credentials.')
| gpl-2.0 | 7,941,409,118,459,670,000 | 32.046358 | 77 | 0.670341 | false |
perkinslr/pypyjs | addedLibraries/nevow/test/test_context.py | 2 | 3856 | # -*- test-case-name: nevow.test.test_context -*-
# Copyright (c) 2004 Divmod.
# See LICENSE for details.
import itertools
import time
import zope.interface as zi
from twisted.python.components import registerAdapter
from nevow import context
from nevow import tags
from nevow import inevow
from nevow.testutil import TestCase
class IStuff(zi.Interface): pass
class TestRememberLocate(TestCase):
def test_basic(self):
top = context.WovenContext()
middle = context.WovenContext(top, tags.invisible())
bottom = context.WovenContext(middle, tags.invisible())
top.remember(0, IStuff)
self.assertEquals(bottom.locate(IStuff), 0)
middle.remember(1, IStuff)
self.assertEquals(bottom.locate(IStuff), 1)
self.assertEquals(bottom.locate(IStuff, depth=2), 0)
def test_reverse(self):
top = context.WovenContext().remember(0, IStuff)
bottom = context.WovenContext(top, tags.invisible()).remember(1, IStuff)
self.assertEquals(bottom.locate(IStuff, depth=-1), 0)
def test_page(self):
page = context.PageContext(tag=1)
page.remember(1, inevow.IData)
ctx = context.WovenContext(page, tags.invisible())
self.assertEquals(ctx.locate(inevow.IData), 1)
self.assertEquals(ctx.locate(inevow.IData, depth=-1), 1)
def test_factoryContext(self):
ctx = TestContext()
self.assertEquals(IFoo(ctx), True)
def test_factoryContextFromLocate(self):
factory = TestContext()
ctx = context.WovenContext(parent=factory)
self.assertEquals(IFoo(ctx), True)
def test_factoryContextRemembers(self):
basectx = TestContext()
ctx = context.WovenContext(parent=basectx)
bar1 = IBar(ctx)
ctx = context.WovenContext(parent=basectx)
bar2 = IBar(ctx)
self.assertEqual(bar1, bar2)
def test_negativeLocate(self):
ctx = context.WovenContext()
self.assertRaises(KeyError, ctx.locate, IFoo)
self.assertRaises(TypeError, IFoo, ctx)
def test_negativeSomething(self):
factory = TestContext()
ctx = context.WovenContext(parent=factory)
self.assertRaises(KeyError, ctx.locate, inevow.IData)
def test_slots(self):
ctx = context.WovenContext()
ctx.fillSlots('foo', 'bar')
ctx = context.WovenContext(parent=ctx)
self.assertEquals(
ctx.locateSlotData('foo'),
'bar')
def test_negativeSlots(self):
ctx = context.WovenContext()
self.assertRaises(KeyError, ctx.locateSlotData, 'foo')
def benchmark_longContextChainArg(self):
from nevow import testutil
ctx = context.RequestContext(
tag=testutil.FakeRequest(args=dict(foo=["foo"], bar=["bar"])))
for x in range(5):
## Do some factory contexts
ctx = TestContext(parent=ctx)
for x in range(100):
## Do a bunch of crap
ctx = context.WovenContext(parent=ctx)
## Look for some request arguments
loops = 1e4
before = time.clock()
for x in xrange(loops):
ignored = ctx.arg('foo')
ignored = ctx.arg('bar')
after = time.clock()
self.recordStat({"arg/(cpu sec)": loops / (after - before)})
class TestContext(context.FactoryContext):
"""A target for registering adatpters.
"""
# IFoo interface/adapter that always adapts to True
class IFoo(zi.Interface):
"""A dummy interface.
"""
dummyAdapter = lambda x: True
registerAdapter(dummyAdapter, TestContext, IFoo)
# IBar interface that adapts to an incrementing value
class IBar(zi.Interface):
"""A dummy interface.
"""
nextBar = itertools.count()
def barFactory(ctx):
return nextBar.next()
registerAdapter(barFactory, TestContext, IBar)
| mit | -3,390,187,045,239,591,400 | 29.125 | 80 | 0.649637 | false |
jkkummerfeld/1ec-graph-parser | parser/nn-tagger/pre-process.py | 1 | 1342 | #!/usr/bin/env python3
import sys
import string
def map_token(token, pos):
# Lowercase
token = token.lower()
# Alternatives:
# - Just change the case of the first letter of the first word
# - Also, leave it as is if we've seen this capitalised elsewhere
# Replace numbers
letters = []
for letter in token:
if letter in string.digits:
if len(letters) > 0 and letters[-1] == '0':
continue
else:
letters.append("0")
else:
letters.append(letter)
token = ''.join(letters)
# Do the suffix trick?
return token
for line in sys.stdin:
if line.strip().startswith("# SentID"):
print(line.strip())
continue
mapping = True
tokens = []
token_count = 0
for pos, token in enumerate(line.strip().split()):
if token == "|||" or (not mapping):
tokens.append(token)
mapping = False
else:
token_count += 1
tokens.append(map_token(token, pos))
# Add "_" tags if needed
assert len(tokens) <= token_count * 2 + 1
if len(tokens) < token_count * 2 + 1:
if mapping:
tokens.append("|||")
while len(tokens) < token_count * 2 + 1:
tokens.append("_;")
print(" ".join(tokens))
| isc | -6,227,678,315,729,307,000 | 24.320755 | 69 | 0.539493 | false |
Gaia3D/QGIS | python/plugins/db_manager/dlg_table_properties.py | 6 | 11437 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : Oct 13, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
The content of this file is based on
- PG_Manager by Martin Dobias (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import Qt, SIGNAL
from PyQt4.QtGui import QDialog, QMessageBox, QApplication
from .db_plugins.data_model import TableFieldsModel, TableConstraintsModel, TableIndexesModel
from .db_plugins.plugin import BaseError
from .dlg_db_error import DlgDbError
from .dlg_field_properties import DlgFieldProperties
from .dlg_add_geometry_column import DlgAddGeometryColumn
from .dlg_create_constraint import DlgCreateConstraint
from .dlg_create_index import DlgCreateIndex
from .ui.ui_DlgTableProperties import Ui_DbManagerDlgTableProperties as Ui_Dialog
class DlgTableProperties(QDialog, Ui_Dialog):
def __init__(self, table, parent=None):
QDialog.__init__(self, parent)
self.table = table
self.setupUi(self)
self.db = self.table.database()
m = TableFieldsModel(self)
self.viewFields.setModel(m)
m = TableConstraintsModel(self)
self.viewConstraints.setModel(m)
m = TableIndexesModel(self)
self.viewIndexes.setModel(m)
self.connect(self.btnAddColumn, SIGNAL("clicked()"), self.addColumn)
self.connect(self.btnAddGeometryColumn, SIGNAL("clicked()"), self.addGeometryColumn)
self.connect(self.btnEditColumn, SIGNAL("clicked()"), self.editColumn)
self.connect(self.btnDeleteColumn, SIGNAL("clicked()"), self.deleteColumn)
self.connect(self.btnAddConstraint, SIGNAL("clicked()"), self.addConstraint)
self.connect(self.btnDeleteConstraint, SIGNAL("clicked()"), self.deleteConstraint)
self.connect(self.btnAddIndex, SIGNAL("clicked()"), self.createIndex)
self.connect(self.btnAddSpatialIndex, SIGNAL("clicked()"), self.createSpatialIndex)
self.connect(self.btnDeleteIndex, SIGNAL("clicked()"), self.deleteIndex)
self.populateViews()
self.checkSupports()
def checkSupports(self):
allowEditColumns = self.db.connector.hasTableColumnEditingSupport()
self.btnEditColumn.setEnabled(allowEditColumns)
self.btnDeleteColumn.setEnabled(allowEditColumns)
allowSpatial = self.db.connector.hasSpatialSupport()
self.btnAddGeometryColumn.setEnabled(allowSpatial)
self.btnAddSpatialIndex.setEnabled(allowSpatial)
def populateViews(self):
self.populateFields()
self.populateConstraints()
self.populateIndexes()
def populateFields(self):
""" load field information from database """
m = self.viewFields.model()
m.clear()
for fld in self.table.fields():
m.append(fld)
for col in range(4):
self.viewFields.resizeColumnToContents(col)
def currentColumn(self):
""" returns row index of selected column """
sel = self.viewFields.selectionModel()
indexes = sel.selectedRows()
if len(indexes) == 0:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("nothing selected"))
return -1
return indexes[0].row()
def addColumn(self):
""" open dialog to set column info and add column to table """
dlg = DlgFieldProperties(self, None, self.table)
if not dlg.exec_():
return
fld = dlg.getField()
QApplication.setOverrideCursor(Qt.WaitCursor)
self.emit(SIGNAL("aboutToChangeTable()"))
try:
# add column to table
self.table.addField(fld)
self.populateViews()
except BaseError, e:
DlgDbError.showError(e, self)
return
finally:
QApplication.restoreOverrideCursor()
def addGeometryColumn(self):
""" open dialog to add geometry column """
dlg = DlgAddGeometryColumn(self, self.table)
if not dlg.exec_():
return
self.populateViews()
def editColumn(self):
""" open dialog to change column info and alter table appropriately """
index = self.currentColumn()
if index == -1:
return
m = self.viewFields.model()
# get column in table
# (there can be missing number if someone deleted a column)
fld = m.getObject(index)
dlg = DlgFieldProperties(self, fld, self.table)
if not dlg.exec_():
return
new_fld = dlg.getField(True)
QApplication.setOverrideCursor(Qt.WaitCursor)
self.emit(SIGNAL("aboutToChangeTable()"))
try:
fld.update(new_fld.name, new_fld.type2String(), new_fld.notNull, new_fld.default2String())
self.populateViews()
except BaseError, e:
DlgDbError.showError(e, self)
return
finally:
QApplication.restoreOverrideCursor()
def deleteColumn(self):
""" delete currently selected column """
index = self.currentColumn()
if index == -1:
return
m = self.viewFields.model()
fld = m.getObject(index)
res = QMessageBox.question(self, self.tr("Are you sure"), self.tr("really delete column '%s'?") % fld.name,
QMessageBox.Yes | QMessageBox.No)
if res != QMessageBox.Yes:
return
QApplication.setOverrideCursor(Qt.WaitCursor)
self.emit(SIGNAL("aboutToChangeTable()"))
try:
fld.delete()
self.populateViews()
except BaseError, e:
DlgDbError.showError(e, self)
return
finally:
QApplication.restoreOverrideCursor()
def populateConstraints(self):
constraints = self.table.constraints()
if constraints is None:
self.hideConstraints() # not supported
return
m = self.viewConstraints.model()
m.clear()
for constr in constraints:
m.append(constr)
for col in range(3):
self.viewConstraints.resizeColumnToContents(col)
def hideConstraints(self):
index = self.tabs.indexOf(self.tabConstraints)
if index >= 0:
self.tabs.setTabEnabled(index, False)
def addConstraint(self):
""" add primary key or unique constraint """
dlg = DlgCreateConstraint(self, self.table)
if not dlg.exec_():
return
self.populateViews()
def deleteConstraint(self):
""" delete a constraint """
index = self.currentConstraint()
if index == -1:
return
m = self.viewConstraints.model()
constr = m.getObject(index)
res = QMessageBox.question(self, self.tr("Are you sure"),
self.tr("really delete constraint '%s'?") % constr.name,
QMessageBox.Yes | QMessageBox.No)
if res != QMessageBox.Yes:
return
QApplication.setOverrideCursor(Qt.WaitCursor)
self.emit(SIGNAL("aboutToChangeTable()"))
try:
constr.delete()
self.populateViews()
except BaseError, e:
DlgDbError.showError(e, self)
return
finally:
QApplication.restoreOverrideCursor()
def currentConstraint(self):
""" returns row index of selected index """
sel = self.viewConstraints.selectionModel()
indexes = sel.selectedRows()
if len(indexes) == 0:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("nothing selected"))
return -1
return indexes[0].row()
def populateIndexes(self):
indexes = self.table.indexes()
if indexes is None:
self.hideIndexes()
return
m = self.viewIndexes.model()
m.clear()
for idx in indexes:
m.append(idx)
for col in range(2):
self.viewIndexes.resizeColumnToContents(col)
def hideIndexes(self):
index = self.tabs.indexOf(self.tabIndexes)
if index >= 0:
self.tabs.setTabEnabled(index, False)
def createIndex(self):
""" create an index """
dlg = DlgCreateIndex(self, self.table)
if not dlg.exec_():
return
self.populateViews()
def createSpatialIndex(self):
""" create spatial index for the geometry column """
if self.table.type != self.table.VectorType:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("The selected table has no geometry"))
return
res = QMessageBox.question(self, self.tr("Create?"),
self.tr("Create spatial index for field %s?") % self.table.geomColumn,
QMessageBox.Yes | QMessageBox.No)
if res != QMessageBox.Yes:
return
# TODO: first check whether the index doesn't exist already
QApplication.setOverrideCursor(Qt.WaitCursor)
self.emit(SIGNAL("aboutToChangeTable()"))
try:
self.table.createSpatialIndex()
self.populateViews()
except BaseError, e:
DlgDbError.showError(e, self)
return
finally:
QApplication.restoreOverrideCursor()
def currentIndex(self):
""" returns row index of selected index """
sel = self.viewIndexes.selectionModel()
indexes = sel.selectedRows()
if len(indexes) == 0:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("Nothing selected"))
return -1
return indexes[0].row()
def deleteIndex(self):
""" delete currently selected index """
index = self.currentIndex()
if index == -1:
return
m = self.viewIndexes.model()
idx = m.getObject(index)
res = QMessageBox.question(self, self.tr("Are you sure"), self.tr("really delete index '%s'?") % idx.name,
QMessageBox.Yes | QMessageBox.No)
if res != QMessageBox.Yes:
return
QApplication.setOverrideCursor(Qt.WaitCursor)
self.emit(SIGNAL("aboutToChangeTable()"))
try:
idx.delete()
self.populateViews()
except BaseError, e:
DlgDbError.showError(e, self)
return
finally:
QApplication.restoreOverrideCursor()
| gpl-2.0 | 8,228,031,055,116,039,000 | 32.837278 | 115 | 0.577774 | false |
cd34/apex | examples/apex_example/setup.py | 2 | 1066 | import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
requires = [
'pyramid',
'SQLAlchemy',
'transaction',
'repoze.tm2>=1.0b1', # default_commit_veto
'zope.sqlalchemy',
'WebError',
'apex',
]
if sys.version_info[:3] < (2,5,0):
requires.append('pysqlite')
setup(name='apex_example',
version='0.0',
description='apex_example',
long_description="",
classifiers=[
"Programming Language :: Python",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='apex_example',
install_requires = requires,
entry_points = """\
[paste.app_factory]
main = apex_example:main
""",
paster_plugins=['pyramid'],
)
| mit | 2,763,441,838,803,290,600 | 22.173913 | 63 | 0.569418 | false |
glenndmello/luigi | examples/top_artists.py | 4 | 9103 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from collections import defaultdict
from heapq import nlargest
from luigi import six
import luigi
import luigi.hadoop
import luigi.hdfs
import luigi.postgres
class ExternalStreams(luigi.ExternalTask):
"""
Example of a possible external data dump
To depend on external targets (typically at the top of your dependency graph), you can define
an ExternalTask like this.
"""
date = luigi.DateParameter()
def output(self):
"""
Returns the target output for this task.
In this case, it expects a file to be present in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.hdfs.HdfsTarget(self.date.strftime('data/streams_%Y-%m-%d.tsv'))
class Streams(luigi.Task):
"""
Faked version right now, just generates bogus data.
"""
date = luigi.DateParameter()
def run(self):
"""
Generates bogus data and writes it into the :py:meth:`~.Streams.output` target.
"""
with self.output().open('w') as output:
for _ in range(1000):
output.write('{} {} {}\n'.format(
random.randint(0, 999),
random.randint(0, 999),
random.randint(0, 999)))
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in the local file system.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget(self.date.strftime('data/streams_%Y_%m_%d_faked.tsv'))
class StreamsHdfs(Streams):
"""
This task performs the same work as :py:class:`~.Streams` but its output is written to HDFS.
This class uses :py:meth:`~.Streams.run` and
overrides :py:meth:`~.Streams.output` so redefine HDFS as its target.
"""
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.hdfs.HdfsTarget(self.date.strftime('data/streams_%Y_%m_%d_faked.tsv'))
class AggregateArtists(luigi.Task):
"""
This task runs over the target data returned by :py:meth:`~/.Streams.output` and
writes the result into its :py:meth:`~.AggregateArtists.output` target (local file).
"""
date_interval = luigi.DateIntervalParameter()
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file on the local filesystem.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget("data/artist_streams_{}.tsv".format(self.date_interval))
def requires(self):
"""
This task's dependencies:
* :py:class:`~.Streams`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return [Streams(date) for date in self.date_interval]
def run(self):
artist_count = defaultdict(int)
for t in self.input():
with t.open('r') as in_file:
for line in in_file:
_, artist, track = line.strip().split()
artist_count[artist] += 1
with self.output().open('w') as out_file:
for artist, count in six.iteritems(artist_count):
out_file.write('{}\t{}\n'.format(artist, count))
class AggregateArtistsHadoop(luigi.hadoop.JobTask):
"""
This task runs a :py:class:`luigi.hadoop.JobTask` task
over each target data returned by :py:meth:`~/.StreamsHdfs.output` and
writes the result into its :py:meth:`~.AggregateArtistsHadoop.output` target (a file in HDFS).
This class uses :py:meth:`luigi.contrib.spark.SparkJob.run`.
"""
date_interval = luigi.DateIntervalParameter()
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.hdfs.HdfsTarget(
"data/artist_streams_%s.tsv" % self.date_interval,
format=luigi.hdfs.PlainDir
)
def requires(self):
"""
This task's dependencies:
* :py:class:`~.StreamsHdfs`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return [StreamsHdfs(date) for date in self.date_interval]
def mapper(self, line):
"""
The implementation of the map phase of the Hadoop job.
:param line: the input.
:return: tuple ((key, value) or, in this case, (artist, 1 stream count))
"""
_, artist, _ = line.strip().split()
yield artist, 1
def reducer(self, key, values):
"""
The implementation of the reducer phase of the Hadoop job.
:param key: the artist.
:param values: the stream count.
:return: tuple (artist, count of streams)
"""
yield key, sum(values)
class Top10Artists(luigi.Task):
"""
This task runs over the target data returned by :py:meth:`~/.AggregateArtists.output` or
:py:meth:`~/.AggregateArtistsHadoop.output` in case :py:attr:`~/.Top10Artists.use_hadoop` is set and
writes the result into its :py:meth:`~.Top10Artists.output` target (a file in local filesystem).
"""
date_interval = luigi.DateIntervalParameter()
use_hadoop = luigi.BoolParameter()
def requires(self):
"""
This task's dependencies:
* :py:class:`~.AggregateArtists` or
* :py:class:`~.AggregateArtistsHadoop` if :py:attr:`~/.Top10Artists.use_hadoop` is set.
:return: object (:py:class:`luigi.task.Task`)
"""
if self.use_hadoop:
return AggregateArtistsHadoop(self.date_interval)
else:
return AggregateArtists(self.date_interval)
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file on the local filesystem.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget("data/top_artists_%s.tsv" % self.date_interval)
def run(self):
top_10 = nlargest(10, self._input_iterator())
with self.output().open('w') as out_file:
for streams, artist in top_10:
out_line = '\t'.join([
str(self.date_interval.date_a),
str(self.date_interval.date_b),
artist,
str(streams)
])
out_file.write((out_line + '\n'))
def _input_iterator(self):
with self.input().open('r') as in_file:
for line in in_file:
artist, streams = line.strip().split()
yield int(streams), artist
class ArtistToplistToDatabase(luigi.postgres.CopyToTable):
"""
This task runs a :py:class:`luigi.postgres.CopyToTable` task
over the target data returned by :py:meth:`~/.Top10Artists.output` and
writes the result into its :py:meth:`~.ArtistToplistToDatabase.output` target which,
by default, is :py:class:`luigi.postgres.PostgresTarget` (a table in PostgreSQL).
This class uses :py:meth:`luigi.postgres.CopyToTable.run` and :py:meth:`luigi.postgres.CopyToTable.output`.
"""
date_interval = luigi.DateIntervalParameter()
use_hadoop = luigi.BoolParameter()
host = "localhost"
database = "toplists"
user = "luigi"
password = "abc123" # ;)
table = "top10"
columns = [("date_from", "DATE"),
("date_to", "DATE"),
("artist", "TEXT"),
("streams", "INT")]
def requires(self):
"""
This task's dependencies:
* :py:class:`~.Top10Artists`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return Top10Artists(self.date_interval, self.use_hadoop)
if __name__ == "__main__":
luigi.run()
| apache-2.0 | 600,442,336,424,532,200 | 31.280142 | 111 | 0.610568 | false |
Azure/azure-sdk-for-python | sdk/keyvault/azure-keyvault-administration/tests/blob_container_preparer.py | 1 | 1852 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from datetime import datetime, timedelta
from azure.storage.blob import BlobServiceClient, generate_account_sas, ResourceTypes, AccountSasPermissions
from devtools_testutils import AzureMgmtPreparer
class BlobContainerPreparer(AzureMgmtPreparer):
def __init__(self, **kwargs):
super(BlobContainerPreparer, self).__init__("container", 24, random_name_enabled=True, **kwargs)
def create_resource(self, name, **kwargs):
if self.is_live:
storage_account = kwargs.pop("storage_account")
storage_account_key = kwargs.pop("storage_account_key")
sas_token = generate_account_sas(
account_name=storage_account.name,
account_key=storage_account_key,
resource_types=ResourceTypes(container=True, object=True),
permission=AccountSasPermissions(
create=True, list=True, write=True, read=True, add=True, delete=True, delete_previous_version=True
),
expiry=datetime.utcnow() + timedelta(minutes=30),
)
blob_client = BlobServiceClient(storage_account.primary_endpoints.blob, sas_token)
container = blob_client.create_container(name)
container_uri = storage_account.primary_endpoints.blob + container.container_name
self.test_class_instance.scrubber.register_name_pair(sas_token, "redacted")
self.test_class_instance.scrubber.register_name_pair(container_uri, "https://storage/container")
else:
sas_token = "fake-sas"
container_uri = "https://storage/container"
return {"container_uri": container_uri, "sas_token": sas_token}
| mit | -8,463,971,972,934,652,000 | 50.444444 | 118 | 0.633909 | false |
andersonsilvade/python_C | Python32/web2py/applications/examples/controllers/cache_examples.py | 2 | 1542 | import time
def cache_in_ram():
"""cache the output of the lambda function in ram"""
t = cache.ram('time', lambda: time.ctime(), time_expire=5)
return dict(time=t, link=A('click to reload', _href=URL(r=request)))
def cache_on_disk():
"""cache the output of the lambda function on disk"""
t = cache.disk('time', lambda: time.ctime(), time_expire=5)
return dict(time=t, link=A('click to reload', _href=URL(r=request)))
def cache_in_ram_and_disk():
"""cache the output of the lambda function on disk and in ram"""
t = cache.ram('time', lambda: cache.disk('time', lambda:
time.ctime(), time_expire=5), time_expire=5)
return dict(time=t, link=A('click to reload', _href=URL(r=request)))
@cache(request.env.path_info, time_expire=5, cache_model=cache.ram)
def cache_controller_in_ram():
"""cache the output of the controller in ram"""
t = time.ctime()
return dict(time=t, link=A('click to reload', _href=URL(r=request)))
@cache(request.env.path_info, time_expire=5, cache_model=cache.disk)
def cache_controller_on_disk():
"""cache the output of the controller on disk"""
t = time.ctime()
return dict(time=t, link=A('click to reload', _href=URL(r=request)))
@cache(request.env.path_info, time_expire=5, cache_model=cache.ram)
def cache_controller_and_view():
"""cache the output of the controller rendered by the view in ram"""
t = time.ctime()
d = dict(time=t, link=A('click to reload', _href=URL(r=request)))
return response.render(d)
| mit | 3,043,570,753,729,817,000 | 31.125 | 72 | 0.656291 | false |
lukeroge/CloudBot | plugins/cypher.py | 21 | 1815 | """
cypher.py
Ciphers and deciphers strings.
Created By:
- Tom <https://github.com/instanceoftom>
Modified By:
- Fletcher Boyd <https://github.com/thenoodle68>
- Dabo Ross <https://github.com/daboross>
- Luke Rogers <https://github.com/lukeroge>
License:
GPL v3
"""
import base64
import binascii
from cloudbot import hook
def encode(password, text):
"""
:type password: str
:type text: str
"""
enc = []
for i in range(len(text)):
key_c = password[i % len(password)]
enc_c = chr((ord(text[i]) + ord(key_c)) % 256)
enc.append(enc_c)
return base64.urlsafe_b64encode("".join(enc).encode()).decode()
def decode(password, encoded, notice):
"""
:type password: str
:type encoded: str
"""
dec = []
try:
encoded_bytes = base64.urlsafe_b64decode(encoded.encode()).decode()
except binascii.Error:
notice("Invalid input '{}'".format(encoded))
return
for i in range(len(encoded_bytes)):
key_c = password[i % len(password)]
dec_c = chr((256 + ord(encoded_bytes[i]) - ord(key_c)) % 256)
dec.append(dec_c)
return "".join(dec)
@hook.command("cypher", "cipher")
def cypher(text, notice):
"""<pass> <string> -- cyphers <string> with <password>"""
split = text.split(None, 1)
if len(split) < 2:
notice(cypher.__doc__)
return
password = split[0]
plaintext = split[1]
return encode(password, plaintext)
@hook.command("decypher", "decipher")
def decypher(text, notice):
"""<pass> <string> - decyphers <string> with <password>"""
split = text.split(None, 1)
if len(split) < 2:
notice(decypher.__doc__)
return
password = split[0]
encoded = split[1]
return decode(password, encoded, notice)
| gpl-3.0 | -4,556,028,604,208,984,000 | 22.881579 | 75 | 0.6 | false |
praveendareddy21/my-repo | tree_de.py | 1 | 1193 | class nod(object):
def __init__(self ,elem):
self.elem=elem
self.le=None
self.ri=None
def setle(self ,le):
self.le=le
def getle(self):
return self.le
def setri(self ,ri):
self.ri=ri
def getri(self):
return self.ri
class dll(object):
def __init__(self,elem):
self.elem=elem
self.le=None
self.ri=None
def setle(self ,le):
self.le=le
def getle(self):
return self.le
def setri(self ,ri):
self.ri=ri
def getri(self):
return self.ri
s=nod(12)
b=nod(7)
c=nod(14)
s.setle(b)
s.setri(c)
h=nod(9)
b.setri(h)
l=nod(13)
m=nod(16)
c.setle(l)
c.setri(m)
#print s.getle().elem
print s.__dict__
def trav(obj ,d):
if obj.le is not None :
trav(obj.getle(),d)
print obj.elem
if obj.ri is not None :
trav(obj.getri(),d)
else :
t=d
while t.ri is not None:
t=t.getri()
t.ri=obj
obj.ri=None
obj.le=t
d=dll('root')
trav(s ,d)
t=d
while t.ri is not None:
print t.elem
t=t.getri()
print t.elem
while t.le is not None :
print t.elem
t=t.getle()
| mit | 598,142,379,606,190,200 | 17.353846 | 31 | 0.524728 | false |
beni55/edx-platform | lms/djangoapps/django_comment_client/tests/utils.py | 15 | 5494 | """
Utilities for tests within the django_comment_client module.
"""
from datetime import datetime
from mock import patch
from pytz import UTC
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from django_comment_common.models import Role
from django_comment_common.utils import seed_permissions_roles
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.partitions.partitions import UserPartition, Group
class CohortedTestCase(ModuleStoreTestCase):
"""
Sets up a course with a student, a moderator and their cohorts.
"""
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CohortedTestCase, self).setUp()
self.course = CourseFactory.create(
cohort_config={
"cohorted": True,
"cohorted_discussions": ["cohorted_topic"]
}
)
self.student_cohort = CohortFactory.create(
name="student_cohort",
course_id=self.course.id
)
self.moderator_cohort = CohortFactory.create(
name="moderator_cohort",
course_id=self.course.id
)
self.course.discussion_topics["cohorted topic"] = {"id": "cohorted_topic"}
self.course.discussion_topics["non-cohorted topic"] = {"id": "non_cohorted_topic"}
self.store.update_item(self.course, self.user.id)
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
self.moderator = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
self.student_cohort.users.add(self.student)
self.moderator_cohort.users.add(self.moderator)
class ContentGroupTestCase(ModuleStoreTestCase):
"""
Sets up discussion modules visible to content groups 'Alpha' and
'Beta', as well as a module visible to all students. Creates a
staff user, users with access to Alpha/Beta (by way of cohorts),
and a non-cohorted user with no special access.
"""
def setUp(self):
super(ContentGroupTestCase, self).setUp()
self.course = CourseFactory.create(
org='org', number='number', run='run',
# This test needs to use a course that has already started --
# discussion topics only show up if the course has already started,
# and the default start date for courses is Jan 1, 2030.
start=datetime(2012, 2, 3, tzinfo=UTC),
user_partitions=[
UserPartition(
0,
'Content Group Configuration',
'',
[Group(1, 'Alpha'), Group(2, 'Beta')],
scheme_id='cohort'
)
],
cohort_config={'cohorted': True},
discussion_topics={}
)
self.staff_user = UserFactory.create(is_staff=True)
self.alpha_user = UserFactory.create()
self.beta_user = UserFactory.create()
self.non_cohorted_user = UserFactory.create()
for user in [self.staff_user, self.alpha_user, self.beta_user, self.non_cohorted_user]:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
alpha_cohort = CohortFactory(
course_id=self.course.id,
name='Cohort Alpha',
users=[self.alpha_user]
)
beta_cohort = CohortFactory(
course_id=self.course.id,
name='Cohort Beta',
users=[self.beta_user]
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=alpha_cohort,
partition_id=self.course.user_partitions[0].id,
group_id=self.course.user_partitions[0].groups[0].id
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=beta_cohort,
partition_id=self.course.user_partitions[0].id,
group_id=self.course.user_partitions[0].groups[1].id
)
self.alpha_module = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='alpha_group_discussion',
discussion_target='Visible to Alpha',
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]}
)
self.beta_module = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='beta_group_discussion',
discussion_target='Visible to Beta',
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]}
)
self.global_module = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='global_group_discussion',
discussion_target='Visible to Everyone'
)
self.course = self.store.get_item(self.course.location)
| agpl-3.0 | 8,448,469,694,349,997,000 | 41.261538 | 107 | 0.634874 | false |
enen92/openxenmanager | src/OXM/window_menuitem.py | 1 | 78142 | # -----------------------------------------------------------------------
# OpenXenManager
#
# Copyright (C) 2009 Alberto Gonzalez Rodriguez [email protected]
# Copyright (C) 2014 Daniel Lintott <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -----------------------------------------------------------------------
from oxcSERVER import *
from window_addserver import AddServer
import xtea
from version import __version__
from os import path
import utils
class oxcWindowMenuItem:
"""
Class used to manage functions called from menuitems
"""
# HOST/SERVER
def on_m_repair_storage_activate(self, widget, data=None):
"""
Function called on "Repair storage"
"""
self.builder.get_object("cancelrepairstorage").set_label("Cancel")
self.builder.get_object("lblrepairerror").hide()
self.builder.get_object("repairstorage").show()
listrepairstorage = self.builder.get_object("listrepairstorage")
self.xc_servers[self.selected_host].fill_listrepairstorage(listrepairstorage, self.selected_ref)
def on_cancelrepairstorage_clicked(self, widget, data=None):
"""
Function called when you press cancel on "repair storage" window
"""
self.builder.get_object("repairstorage").hide()
def on_acceptrepairstorage_clicked(self, widget, data=None):
"""
Function called when you press Repair on "repair storage" window
"""
self.builder.get_object("lblrepairerror").show()
self.builder.get_object("lblrepairerror").set_markup(
"<span foreground='green'><b>Repairing... wait please.</b></span>")
listrepairstorage = self.builder.get_object("listrepairstorage")
Thread(target=self.xc_servers[self.selected_host].repair_storage,
args=(listrepairstorage, self.selected_ref)).start()
self.builder.get_object("acceptrepairstorage").set_sensitive(False)
def on_m_remove_activate(self, widget, data=None):
"""
Called from "remove" menuitem of server
"""
# Remove server from configuration
del self.config_hosts[self.selected_name]
self.config['servers']['hosts'] = self.config_hosts
self.config.write()
# Remove from left treeview (treestore)
self.treestore.remove(self.selected_iter)
def on_m_forget_activate(self, widget, data=None):
"""
Forget password: don't remember password for server
"""
# Only put to "" the server password on oxc.conf
if self.selected_name in self.config_hosts:
self.config_hosts[self.selected_name][1] = ""
elif self.selected_ip in self.config_hosts:
self.config_hosts[self.selected_ip][1] = ""
elif self.selected_host in self.config_hosts:
self.config_hosts[self.selected_host][1] = ""
def on_m_addserver_activate(self, widget, data=None):
"""
Add server: show the window for add a new server
"""
#self.builder.get_object("addserver").show()
add_server = AddServer(self)
add_server.show_dialog()
# VM
# Make Into Template
def on_m_make_into_template_activate(self, widget, data=None):
"""
Called from "make into template" menuitem of VM
Call to method "make_into_template" of oxcSERVER with selected ref param (vm ref)
"""
self.xc_servers[self.selected_host].make_into_template(self.selected_ref)
# Copy VM
def on_m_snapshot_activate(self, widget, data=None):
"""
Called from "snapshot" menuitem of VM
Show snapshot dialog and set the name to empty
"""
self.builder.get_object("snapshotname").set_text("")
self.builder.get_object("dialogsnapshotname").show()
def on_m_copy_activate(self, widget, data=None):
"""
Called from "copy" menuitem of VM
"""
listcopystg = self.builder.get_object("listcopystg")
treecopystg = self.builder.get_object("treecopystg")
# Set name and description on copy window
self.builder.get_object("txtcopyvmname").set_text("Copy of " + self.selected_name)
self.builder.get_object("txtcopyvmdesc").set_text(
self.xc_servers[self.selected_host].all['vms'][self.selected_ref]['name_description'])
"""
Fill the treeview called "treecopystg" with model "listcopystg" with possible storage
This treeview is only used on "full copy"
fill_listcopystg return the number position of default storage
"""
defsr = self.xc_servers[self.selected_host].fill_listcopystg(listcopystg, self.selected_host)
# Select the default storage
treecopystg.set_cursor((defsr,), treecopystg.get_column(0))
treecopystg.get_selection().select_path((defsr, 0))
# Show the window copy window
self.builder.get_object("windowcopyvm").show()
def on_cancelforcejoinpool_clicked(self, widget, data=None):
"""
Cancel "force join to pool" dialog
"""
self.builder.get_object("forcejoinpool").hide()
def on_acceptforcejoinpool_clicked(self, widget, data=None):
"""
Accept "force join to pool" dialog
"""
last_pool_data = self.xc_servers[self.last_host_pool].last_pool_data
self.xc_servers[self.last_host_pool].add_server_to_pool_force(self.selected_ref, last_pool_data)
self.builder.get_object("forcejoinpool").hide()
def on_m_pool_add_server_activate(self, widget, data=None):
"""
Called from "Add Server" right menu (pool)
"""
for i in range(2, len(self.builder.get_object("menu_m_add_server").get_children())):
self.builder.get_object("menu_m_add_server").remove(
self.builder.get_object("menu_m_add_server").get_children()[2])
for server in self.xc_servers:
if self.xc_servers[server].is_connected:
pool_ref = self.xc_servers[server].all['pool'].keys()[0]
if self.xc_servers[server].all['pool'][pool_ref]["name_label"] == "":
image = gtk.Image()
image.set_from_file(path.join(utils.module_path(), "images/tree_running_16.png"))
item = gtk.ImageMenuItem(gtk.STOCK_HELP, None)
item.use_underline = False
item.set_image(image)
# Host ref
ref = self.xc_servers[server].all['host'].keys()[0]
self.builder.get_object("menu_m_add_server").append(item)
item.connect("activate", self.xc_servers[server].add_server_to_pool, ref, server, ref,
self.selected_ip)
item.get_children()[0].set_label(self.xc_servers[server].all['host'][ref]["name_label"])
item.show()
def on_m_add_to_pool_activate(self, widget, data=None):
"""
Called from "Add To pool" menuitem (server)
"""
for i in range(2, len(self.builder.get_object("menu_add_to_pool").get_children())):
self.builder.get_object("menu_add_to_pool").remove(
self.builder.get_object("menu_add_to_pool").get_children()[2])
for server in self.xc_servers:
if self.xc_servers[server].is_connected:
pool_ref = self.xc_servers[server].all['pool'].keys()[0]
if self.xc_servers[server].all['pool'][pool_ref]["name_label"] != "":
image = gtk.Image()
image.set_from_file(path.join(utils.module_path(), "images/poolconnected_16.png"))
item = gtk.ImageMenuItem(gtk.STOCK_HELP, None)
item.use_underline = False
item.set_image(image)
# Host ref
pool = self.xc_servers[server].all['pool'][pool_ref]["name_label"]
self.builder.get_object("menu_add_to_pool").append(item)
item.connect("activate", self.xc_servers[self.selected_ip].add_server_to_pool, pool_ref,
self.selected_ip, self.selected_ref, server)
item.get_children()[0].set_label(pool)
item.show()
def on_menuitem_pool_add_server_activate(self, widget, data=None):
"""
Called from "Add Server" menuitem (pool)
"""
for i in range(2, len(self.builder.get_object("menu_add_server").get_children())):
self.builder.get_object("menu_add_server").remove(
self.builder.get_object("menu_add_server").get_children()[2])
for server in self.xc_servers:
if self.xc_servers[server].is_connected:
pool_ref = self.xc_servers[server].all['pool'].keys()[0]
if self.xc_servers[server].all['pool'][pool_ref]["name_label"] == "":
image = gtk.Image()
image.set_from_file(path.join(utils.module_path(), "images/tree_running_16.png"))
item = gtk.ImageMenuItem(gtk.STOCK_HELP, None)
item.use_underline = False
item.set_image(image)
# Host ref
ref = self.xc_servers[server].all['host'].keys()[0]
self.builder.get_object("menu_add_server").append(item)
item.connect("activate", self.xc_servers[server].add_server_to_pool, ref, server, ref,
self.selected_ip)
item.get_children()[0].set_label(self.xc_servers[server].all['host'][ref]["name_label"])
item.show()
def on_menuitem_server_add_to_pool_activate(self, widget, data=None):
"""
Called from "Add to pool" menuitem (server)
"""
for i in range(2, len(self.builder.get_object("menu_server_add_to_pool").get_children())):
self.builder.get_object("menu_server_add_to_pool").remove(
self.builder.get_object("menu_server_add_to_pool").get_children()[2])
for server in self.xc_servers:
if self.xc_servers[server].is_connected:
pool_ref = self.xc_servers[server].all['pool'].keys()[0]
if self.xc_servers[server].all['pool'][pool_ref]["name_label"] != "":
image = gtk.Image()
image.set_from_file(path.join(utils.module_path(), "images/poolconnected_16.png"))
item = gtk.ImageMenuItem(gtk.STOCK_HELP, None)
item.use_underline = False
item.set_image(image)
# Host ref
pool = self.xc_servers[server].all['pool'][pool_ref]["name_label"]
self.builder.get_object("menu_server_add_to_pool").append(item)
item.connect("activate", self.xc_servers[self.selected_ip].add_server_to_pool, pool_ref,
self.selected_ip, self.selected_ref, server)
item.get_children()[0].set_label(pool)
item.show()
def on_m_resume_on_activate(self, widget, data=None):
"""
Called from "Resumen on" menuitem of VM
"""
# Remove the previous possible servers of submenu (right menu)
for i in range(2, len(self.builder.get_object("menu_resume_on").get_children())):
self.builder.get_object("menu_resume_on").remove(
self.builder.get_object("menu_resume_on").get_children()[2])
# Go all servers and add to submenu (right menu)
for h in self.xc_servers[self.selected_host].all['host']:
image = gtk.Image()
image.set_from_file(path.join(utils.module_path(), "images/xen.gif"))
item = gtk.ImageMenuItem(gtk.STOCK_HELP, None)
item.use_underline = False
item.set_image(image)
"""
Set the signal, when is clicked call to function "start_resumen_on" with params:
- Selected vm ref
- Host ref
"""
item.connect("activate", self.xc_servers[self.selected_host].resume_vm_on, self.selected_ref, h)
self.builder.get_object("menu_resume_on").append(item)
host_name = self.xc_servers[self.selected_host].all['host'][h]['name_label']
"""
Can start function could return:
- Empty string means vm can start in that server
- Not empty string means means vm cannot start in that server (not memory or other error)
"""
can_start = self.xc_servers[self.selected_host].can_start(self.selected_ref, h)
if can_start:
item.get_children()[0].set_label(host_name + " : " + can_start)
else:
item.get_children()[0].set_label(host_name)
item.show()
# If server cannot be used to resume on it, disable server
if can_start != "":
item.set_sensitive(False)
def on_m_start_on_activate(self, widget, data=None):
"""
Called from "Start on" menuitem of VM
"""
# Remove the previous possible servers of submenu (right menu)
for i in range(2, len(self.builder.get_object("menu_start_on").get_children())):
self.builder.get_object("menu_start_on").remove(self.builder.get_object("menu_start_on").get_children()[2])
# Go all servers and add to submenu (right menu)
for h in self.xc_servers[self.selected_host].all['host']:
image = gtk.Image()
image.set_from_file(path.join(utils.module_path(), "images/xen.gif"))
item = gtk.ImageMenuItem(gtk.STOCK_HELP, None)
item.use_underline = False
item.set_image(image)
"""
Set the signal, when is clicked call to function "start_resumen_on" with params:
- Selected vm ref
- Host ref
"""
item.connect("activate", self.xc_servers[self.selected_host].start_vm_on, self.selected_ref, h)
self.builder.get_object("menu_start_on").append(item)
host_name = self.xc_servers[self.selected_host].all['host'][h]['name_label']
"""
Can start function could return:
- Empty string means vm can start in that server
- Not empty string means means vm cannot start in that server (not memory or other error)
"""
can_start = self.xc_servers[self.selected_host].can_start(self.selected_ref, h)
if can_start:
item.get_children()[0].set_label(host_name + " : " + can_start)
else:
item.get_children()[0].set_label(host_name)
item.show()
# If server cannot be used to resume on it, disable server
if can_start != "":
item.set_sensitive(False)
def on_m_pool_migrate_activate(self, widget, data=None):
"""
Called from "Start on" menuitem of VM
"""
# Remove the previous possible servers of submenu (right menu)
for i in range(2, len(self.builder.get_object("menu_pool_migrate").get_children())):
self.builder.get_object("menu_pool_migrate").remove(
self.builder.get_object("menu_pool_migrate").get_children()[2])
# Go all servers and add to submenu (right menu)
for h in self.xc_servers[self.selected_host].all['host']:
image = gtk.Image()
image.set_from_file(path.join(utils.module_path(), "images/xen.gif"))
item = gtk.ImageMenuItem(gtk.STOCK_HELP, None)
item.use_underline = False
item.set_image(image)
"""
Set the signal, when is clicked call to function "start_resumen_on" with params:
- Selected vm ref
- Host ref
"""
item.connect("activate", self.xc_servers[self.selected_host].migrate_vm, self.selected_ref, h)
self.builder.get_object("menu_pool_migrate").append(item)
host_name = self.xc_servers[self.selected_host].all['host'][h]['name_label']
resident_on = self.xc_servers[self.selected_host].all['vms'][self.selected_ref]['resident_on']
"""
Can start function could return:
- Empty string means vm can start in that server
- Not empty string means means vm cannot start in that server (not memory or other error)
"""
can_start = self.xc_servers[self.selected_host].can_start(self.selected_ref, h)
if can_start:
item.get_children()[0].set_label(host_name + " : " + can_start)
else:
item.get_children()[0].set_label(host_name)
item.show()
# If server cannot be used to resume on it, disable server
if can_start != "" or h == resident_on:
item.set_sensitive(False)
#TOOLBAR
def on_tb_addserver_clicked(self, widget):
add_server = AddServer(self)
add_server.show_dialog()
def on_tb_start_clicked(self, widget, data=None):
"""
"Start" button on toolbar is pressed
Power on a VM
"""
self.xc_servers[self.selected_host].start_vm(self.selected_ref)
def on_tb_clean_shutdown_clicked(self, widget, data=None):
"""
"Clean shutdown" on toolbar is pressed
Clean shutdown a vm
"""
self.xc_servers[self.selected_host].clean_shutdown_vm(self.selected_ref)
def on_tb_hard_shutdown_clicked(self, widget, data=None):
"""
"Hard shutdown" on toolbar is pressed
Hard shutdown a vm
"""
self.xc_servers[self.selected_host].hard_shutdown_vm(self.selected_ref)
def on_tb_clean_reboot_clicked(self, widget, data=None):
"""
"Clean reboot" on toolbar is pressed
Clean reboot a vm
"""
self.xc_servers[self.selected_host].clean_reboot_vm(self.selected_ref)
def on_tb_hard_reboot_clicked(self, widget, data=None):
"""
"Hard reboot" on toolbar is pressed
hard reboot a vm
"""
self.xc_servers[self.selected_host].hard_reboot_vm(self.selected_ref)
def on_tb_suspend_clicked(self, widget, data=None):
"""
"Suspend" on toolbar is pressed
Suspend a vm
"""
self.xc_servers[self.selected_host].suspend_vm(self.selected_ref)
def on_tb_unpause_clicked(self, widget, data=None):
"""
"Resumen" on toolbar is pressed
Resume a suspended vm
"""
self.xc_servers[self.selected_host].unpause_vm(self.selected_ref)
def on_tbalerts_clicked(self, widget, data=None):
"""
Open the alert window
"""
self.builder.get_object("windowalerts").show()
def update_toolbar(self):
"""
This function is called when a VM, host, storage or template is selected
Toolbar buttons are called:
tb_action, e.g: tb_start
check if "start" (removing tb_) exists on possible actions of this VM/host/...
"""
toolbar = self.builder.get_object("toolbar")
# for each children of toolbar
for child in toolbar.get_children():
if gtk.Buildable.get_name(child)[0:3] == "tb_":
# self.selected_actions contains possible actions
# if not exists: disable button
# else: enable button
if not self.selected_actions or \
self.selected_actions.count(gtk.Buildable.get_name(child)[3:]) \
== 0:
child.set_sensitive(False)
else:
child.set_sensitive(True)
if gtk.Buildable.get_name(child)[3:] == "hard_shutdown":
if not self.selected_actions.count("clean_shutdown"):
self.builder.get_object("tb_clean_shutdown").hide()
self.builder.get_object("tb_hard_shutdown").show()
if gtk.Buildable.get_name(child)[3:] == "hard_reboot":
if not self.selected_actions.count("clean_reboot"):
self.builder.get_object("tb_clean_reboot").hide()
self.builder.get_object("tb_hard_reboot").show()
if gtk.Buildable.get_name(child)[3:] == "clean_shutdown":
self.builder.get_object("tb_clean_shutdown").show()
self.builder.get_object("tb_clean_reboot").show()
self.builder.get_object("tb_hard_reboot").hide()
self.builder.get_object("tb_hard_shutdown").hide()
# MENUBAR Actions
def on_m_start_activate(self, widget, data=None):
"""
"Start" menuitem pressed on right click menu
"""
self.xc_servers[self.selected_host].start_vm(self.selected_ref)
def on_m_clean_shutdown_activate(self, widget, data=None):
"""
"Clean shutdown" menuitem pressed on right click menu
"""
if self.selected_type == "vm":
self.xc_servers[self.selected_host].clean_shutdown_vm(self.selected_ref)
elif self.selected_type == "server" or self.selected_type == "host":
self.on_menuitem_server_shutdown_activate(widget, data)
def on_m_clean_reboot_activate(self, widget, data=None):
"""
"Clean reboot" menuitem pressed on right click menu
"""
if self.selected_type == "vm":
self.xc_servers[self.selected_host].clean_reboot_vm(self.selected_ref)
elif self.selected_type == "server" or self.selected_type == "host":
self.on_menuitem_server_reboot_activate(widget, data)
def on_m_suspend_activate(self, widget, data=None):
"""
"Suspend" menuitem pressed on right click menu
"""
self.xc_servers[self.selected_host].suspend_vm(self.selected_ref)
def on_m_unpause_activate(self, widget, data=None):
"""
"Unpause" menuitem pressed on right click menu
"""
self.xc_servers[self.selected_host].unpause_vm(self.selected_ref)
def on_m_hard_reboot_activate(self, widget, data=None):
"""
"Hard reboot" menuitem pressed on right click menu
"""
self.xc_servers[self.selected_host].hard_reboot_vm(self.selected_ref)
def on_m_hard_shutdown_activate(self, widget, data=None):
"""
"Hard shutdown" menuitem pressed on right click menu
"""
self.xc_servers[self.selected_host].hard_shutdown_vm(self.selected_ref)
def on_m_pause_activate(self, widget, data=None):
"""
"Pause" menuitem pressed on right click menu
"""
self.xc_servers[self.selected_host].pause_vm(self.selected_ref)
def on_m_unsuspend_activate(self, widget, data=None):
"""
"Resume" (unsuspend) menuitem pressed on right click menu
"""
self.xc_servers[self.selected_host].unsuspend_vm(self.selected_ref)
def on_m_resume_activate(self, widget, data=None):
"""
"Resume" menuitem pressed on right click menu
"""
self.xc_servers[self.selected_host].resume_vm(self.selected_ref)
def on_menuitem_tools_updatemanager_activate(self, widget, data=None):
"""
"Update Manager" menuitem pressed on right click menu
"""
listupdates = self.builder.get_object("listupdates")
treeupdates = self.builder.get_object("treeupdates")
self.xc_servers[self.selected_host].fill_list_updates(self.selected_ref, listupdates)
if listupdates.__len__():
treeupdates.set_cursor((0, ), treeupdates.get_column(0))
treeupdates.get_selection().select_path((0, ))
self.builder.get_object("updatemanager").show()
def on_installxenservertools_activate(self, widget, data=None):
"""
"Install XenServer Tools" menuitem pressed on right click menu
"""
self.xc_servers[self.selected_host].install_xenserver_tools(self.selected_ref)
def on_m_forget_activate(self, widget, data=None): # FIXME: Duplicate definition
"""
"Forget Storage" menuitem pressed on right click menu
"""
target = self.xc_servers[self.selected_host].forget_storage(self.selected_ref)
def on_m_unplug_activate(self, widget, data=None):
"""
"Detach Storage" menuitem pressed on right click menu
"""
# Show confirmation dialog
self.builder.get_object("detachstorage").show()
def on_acceptdetachstorage_clicked(self, wwidget, data=None):
"""
Function called when you accept confirmation "detach storage" dialog
"""
#target=self.xc_servers[self.selected_host].detach_storage(self.selected_ref)
Thread(target=self.xc_servers[self.selected_host].detach_storage, args=(self.selected_ref,)).start()
self.builder.get_object("detachstorage").hide()
def on_canceldetachstorage_clicked(self, widget, data=None):
"""
Function called when you cancel confirmation "detach storage" dialog
"""
self.builder.get_object("detachstorage").hide()
def on_m_reattach_activate(self, widget, data=None):
"""
"Reattach Storage" menuitem pressed on right click menu
"""
stgtype = self.xc_servers[self.selected_host].all['SR'][self.selected_ref]['type']
# If selected type is iso, you only can select "NFS ISO" or "CIFS ISO"
if stgtype == "iso":
disable = ["radionewstgnfsvhd", "radionewstgiscsi", "radionewstghwhba",
"radionewstgnetapp", "radionewstgdell"]
for widget in disable:
self.builder.get_object(widget).set_sensitive(False)
enable = ["radionewstgcifs", "radionewstgnfsiso"]
for widget in enable:
self.builder.get_object(widget).set_sensitive(True)
elif stgtype == "lvmoiscsi":
self.builder.get_object("radionewstgiscsi").set_active(True)
self.builder.get_object("txtiscsiname").set_text(self.selected_name)
self.on_nextnewstorage_clicked(self.builder.get_object("nextnewstorage"), data)
self.builder.get_object("previousnewstorage").set_sensitive(False)
elif stgtype == "nfs":
self.builder.get_object("radionewstgnfsvhd").set_active(True)
self.builder.get_object("txtnewstgnfsname").set_text(self.selected_name)
self.on_nextnewstorage_clicked(widget, data)
self.builder.get_object("previousnewstorage").set_sensitive(False)
else:
print stgtype
self.builder.get_object("radionewstgcifs").set_active(True)
# Flag variable to know if we will do a reattach
self.reattach_storage = True
self.builder.get_object("newstorage").show()
def on_m_importvm_activate(self, widget, data=None):
"""
"Import VM" menuitem pressed on right click menu
"""
blue = gtk.gdk.color_parse("#d5e5f7")
# Disable "next button", it will be enabled when file is selected
self.builder.get_object("nextvmimport").set_sensitive(False)
self.builder.get_object("eventimport0").modify_bg(gtk.STATE_NORMAL, blue)
# Set a filter, you only can selected *.xva files
self.builder.get_object("filefilterimportvm").add_pattern("*.xva")
# Show the import window
self.builder.get_object("vmimport").show()
# listimportservers contains the connected servers
listimportservers = self.builder.get_object("listimportservers")
listimportservers.clear()
# For each host in config..
for host in self.config_hosts:
# If we are connected to this server
if host in self.xc_servers:
# Then add to list
listimportservers.append([gtk.gdk.pixbuf_new_from_file(path.join(utils.module_path(),
"images/tree_connected_16.png")),
self.xc_servers[host].hostname, True, host])
"""
else:
listimportservers.append([gtk.gdk.pixbuf_new_from_file(path.join(utils.module_path(),
"images/tree_disconnected_16.png")),
host,False]);
"""
# If we are connected to some server..
if listimportservers.__len__():
treeimportservers = self.builder.get_object("treeimportservers")
# Then selected the first
treeimportservers.set_cursor((0, ), treeimportservers.get_column(0))
treeimportservers.get_selection().select_path((0, ))
def on_m_export_activate(self, widget, data=None):
"""
"Export VM" menuitem pressed on right click menu
"""
# Set default name
self.filesave.set_current_name(self.selected_name + ".xva")
# Show the choose dialog
self.filesave.show()
def on_m_snap_newvm_activate(self, widget, data=None):
"""
"New VM From snapshot" menuitem pressed on "snapshot" menu (Snapshots tab of VM)
"""
# Show the "new vm" window
# TODO -> select vm with name_label
self.on_m_newvm_activate(widget, data)
def on_m_snap_createtpl_activate(self, widget, data=None):
"""
"Create template from snapshot" menuitem pressed on "snapshot" menu (Snapshots tab of VM)
"""
# set a default name
self.builder.get_object("snaptplname").set_text(
"Template from snapshot '" +
self.xc_servers[self.selected_host].all['vms'][self.selected_snap_ref]['name_label'] + "'")
# Shows a dialog to enter a name for new template
self.builder.get_object("dialogsnaptplname").show()
def on_m_snap_delete_activate(self, widget, data=None):
"""
"Delete snapshot" menuitem pressed on "snapshot" menu (Snapshots tab of VM)
"""
# Show a dialog asking confirmation
self.builder.get_object("dialogsnapshotdelete").show()
def on_m_destroy_activate(self, widget, data=None):
"""
"Destroy" menuitem pressed on right click menu (VM)
"""
# Show a dialog asking confirmation
if self.selected_type == "vm":
self.builder.get_object("dialogdeletevm").show()
self.builder.get_object("dialogdeletevm").set_markup("Are you sure you want to delete VM '" +
self.selected_name + "' ?")
elif self.selected_type == "template" or self.selected_type == "custom_template":
self.builder.get_object("dialogdeletevm").show()
self.builder.get_object("dialogdeletevm").set_markup("Are you sure you want to delete template '" +
self.selected_name + "' ?")
elif self.selected_type == "storage":
print "delete storage"
#self.treestore.remove(self.selected_iter)
#self.xc_servers[self.selected_host].destroy_vm(self.selected_ref)
def on_m_connect_activate(self, widget, data=None):
"""
"Connect" menuitem pressed on right click menu (Host)
"""
# Checks if exists a "master password"
# Master password if need to save reverse passwords with XTEA
# XTEA is a block cipher to save server password on oxc.conf
# If master password if used (saved on oxc.conf as md5) use it to
# xtea decrypt
if self.selected_name not in self.config_hosts:
return
# if len(self.config_hosts[self.selected_name]) > 2:
# self.builder.get_object("checksslconnection").set_active(
# str(self.config_hosts[self.selected_name][2]) == "True")
if len(self.config_hosts[self.selected_name]) > 2:
use_ssl = self.config_hosts[self.selected_name][2]
else:
use_ssl = None
if len(self.config_hosts[self.selected_name]) > 3:
verify_ssl = self.config_hosts[self.selected_name][3]
else:
verify_ssl = None
if self.password and self.config_hosts[self.selected_name][1]:
# Decrypt password to plain
# Use typed master password (previously checked with md5)
# Fill characters left with "X" to reach a 16 characters
decrypt_pw = xtea.crypt("X" * (16-len(self.password)) + self.password,
self.config_hosts[self.selected_name][1].decode("hex"), self.iv)
# Call to add server with name, ip and decrypted password
# Add server try to connect to the server
add_server = AddServer(self,
self.selected_name,
self.config_hosts[self.selected_name][0],
decrypt_pw, use_ssl=use_ssl,
verify_ssl=verify_ssl)
add_server.connect_server()
else:
# If master password is not set or server hasn't a saved password
# Empty entries
#self.builder.get_object("addserverhostname").get_child().set_text(self.selected_name)
#self.builder.get_object("addserverusername").set_text(self.config_hosts[self.selected_name][0])
#self.builder.get_object("addserverpassword").set_text("")
# Show the add server window
#self.builder.get_object("addserver").show_all()
#self.builder.get_object("addserverpassword").grab_focus()
add_server = AddServer(self, self.selected_name,
self.config_hosts[self.selected_name][0])
add_server.show_dialog("addserverpassword")
def on_m_disconnect_activate(self, widget, data=None):
"""
"Disconnect" menuitem pressed on right click menu (Host)
"""
# Checks if exists a "master password"
# get the ip/host (not virtual name)
host = self.xc_servers[self.selected_host].host
# Logout implies:
# - Unregister events to current session
# - Disconnect of server
self.xc_servers[self.selected_host].logout()
# Remove from list (and children)
if len(self.treestore.get_path(self.selected_iter)) == 2:
self.treestore.remove(self.selected_iter)
else:
vm_path = (self.treestore.get_path(self.selected_iter)[0], self.treestore.get_path(self.selected_iter)[1])
iter = self.treestore.get_iter(vm_path)
self.treestore.remove(iter)
# Add again the ip/host name
self.treestore.append(self.treeroot, ([gtk.gdk.pixbuf_new_from_file(
path.join(utils.module_path(), "images/tree_disconnected_16.png")), host, None, "server", "Disconnected",
None, None, ["connect", "forgetpw", "remove"], None]))
# If copy window is showed.. hide
self.builder.get_object("windowcopyvm").hide()
self.treeview.set_cursor((0, ), self.treeview.get_column(0))
self.treeview.get_selection().select_path((0, ))
# Update tabs
self.selected_type = "home"
self.update_tabs()
# Delete alerts
self.builder.get_object("listalerts").clear()
for host in self.xc_servers:
if self.xc_servers[host].is_connected:
self.xc_servers[host].fill_alerts(self.listalerts)
self.update_n_alerts()
def on_m_newvm_activate(self, widget, data=None):
"""
"New VM" menuitem pressed on right click menu (Host)
"""
# self.newvmdata is used to set "new vm" parameters
self.newvmdata = {}
listtemplates = self.builder.get_object("listtemplates")
# Fill the "list of templates" to create a new VM
self.xc_servers[self.selected_host].fill_list_templates(listtemplates)
# Set to first page and setting "page_comple" next button is enabled
self.builder.get_object("tabboxnewvm").set_current_page(0)
# Select the first template by default
treetemplates = self.builder.get_object("treetemplates")
treetemplates.set_cursor((0, 1), treetemplates.get_column(1))
treetemplates.get_selection().select_path((0, 1))
# For some templates is needed use DVD Drive or ISO Images
# Fill the possible iso images to use
self.xc_servers[self.selected_host].fill_list_isoimages(self.listisoimage)
self.builder.get_object("radiobutton3_data").set_active(1)
# Fill the connected DVDS
self.xc_servers[self.selected_host].fill_list_phydvd(self.listphydvd)
# Default interfaces for the new vm, and set the first parameter: the number of the interfaces
self.xc_servers[self.selected_host].fill_list_networks(self.listnetworks, self.listnetworkcolumn)
listnewvmhosts = self.builder.get_object("listnewvmhosts")
treenewvmhosts = self.builder.get_object("treenewvmhosts")
# A new vm could be started on some host (e.g a pool with servers)
# Fill the possible hosts where vm could be start
path = self.xc_servers[self.selected_host].fill_listnewvmhosts(listnewvmhosts)
# Set the default server
treenewvmhosts.set_cursor((path, 1), treenewvmhosts.get_column(0))
treenewvmhosts.get_selection().select_path((path, 1))
# Setting a default options
self.newvmdata['location'] = "radiobutton1"
self.newvmdata['vdi'] = ""
self.builder.get_object("lblnewvm0").set_markup(
'<span background="blue" foreground="white"><b>%-35s</b></span>' % "Template")
labels = ["Name", "Location", "Home Server", "CPU / Memory", "Virtual disks", "Virtual Interfaces", "Finish"]
for i in range(1, 8):
self.builder.get_object("lblnewvm" + str(i)).set_markup(" <b>%-35s</b>" % labels[i-1])
# Show the "new vm" assistant
self.newvm.show()
# MENUBAR checks
def on_checksavepassword_toggled(self, widget, data=None):
self.builder.get_object("label259").set_sensitive(widget.get_active())
self.builder.get_object("txtmasterpassword").set_sensitive(widget.get_active())
def on_checkshowxtpls_toggled(self, widget, data=None):
"""
Enable or disable show templates on left tree
"""
# Save enable or disable to configuration
self.config["gui"]["show_xs_templates"] = widget.get_active()
self.config.write()
# Call to "refilter" to hide/show the templates
self.modelfilter.refilter()
def on_checkshowhiddenvms_toggled(self, widget, data=None):
"""
Enable or disable show templates on left tree
"""
# Save enable or disable to configuration
self.config["gui"]["show_hidden_vms"] = widget.get_active()
self.config.write()
# Call to "refilter" to hide/show the templates
self.modelfilter.refilter()
def on_checkshowtoolbar_toggled(self, widget, data=None):
"""
Enable or disable show top toolbar
"""
self.config["gui"]["show_toolbar"] = widget.get_active()
# If is active, show the toolbar, else hide the toolbar
if widget.get_active():
self.builder.get_object("toolbar").show()
else:
self.builder.get_object("toolbar").hide()
# Save in configuration
self.config.write()
def on_checkshowcustomtpls_toggled(self, widget, data=None, a=None):
"""
Enable or disable show custom templates on left tree
"""
self.config["gui"]["show_custom_templates"] = widget.get_active()
# Save in configuration
self.config.write()
# Call to "refilter" to hide/show custom templates
self.modelfilter.refilter()
def on_checkshowlocalstorage_toggled(self, widget, data=None, a=None):
"""
Enable or disable show local storage on left tree
"""
self.config["gui"]["show_local_storage"] = widget.get_active()
# Save in configuration
self.config.write()
# Call to "refilter" to hide/show custom templates
self.modelfilter.refilter()
# MENUBAR
def on_menuitem_entermaintenancemode_activate(self, widget, data=None):
"""
"Enter Maintenance Mode" on menuitem is pressed
"""
listmaintenancemode = self.builder.get_object("listmaintenancemode")
self.xc_servers[self.selected_host].fill_vms_which_prevent_evacuation(self.selected_ref, listmaintenancemode)
self.builder.get_object("maintenancemode").show()
def on_cancelmaintenancemode_clicked(self, widget, data=None):
"""
Pressed "Cancel" button on maintenance window
"""
self.builder.get_object("maintenancemode").hide()
def on_acceptmaintenancemode_clicked(self, widget, data=None):
"""
Pressed "Accept" button on maintenance window
"""
self.xc_servers[self.selected_host].enter_maintancemode(self.selected_ref)
self.builder.get_object("maintenancemode").hide()
def on_menuitem_exitmaintenancemode_activate(self, widget, data=None):
"""
"Exit Maintenance Mode" on menuitem is pressed
"""
self.xc_servers[self.selected_host].exit_maintancemode(self.selected_ref)
def on_menuitem_vm_startrecovery_activate(self, widget, data=None):
"""
"Start" button on menuitem is pressed
Power on a VM
"""
self.xc_servers[self.selected_host].start_vm_recovery_mode(self.selected_ref)
def on_menuitem_stg_new_activate(self, widget, data=None):
"""
"New Storage Repository" menuitem pressed on menubar
"""
blue = gtk.gdk.color_parse("#d5e5f7")
# Disable "next button", it will be enabled when file is selected
enable = ["radionewstgnfsvhd", "radionewstgiscsi", "radionewstghwhba",
"radionewstgnetapp", "radionewstgdell", "radionewstgcifs",
"radionewstgnfsiso"]
for widget in enable:
self.builder.get_object(widget).set_sensitive(True)
self.reattach_storage = False
self.builder.get_object("nextnewstorage").set_sensitive(True)
self.builder.get_object("eventnewstg0").modify_bg(gtk.STATE_NORMAL, blue)
self.builder.get_object("tabboxnewstorage").set_current_page(0)
self.builder.get_object("newstorage").show()
def on_menuitem_dmesg_activate(self, widget, data=None):
dmesg = self.xc_servers[self.selected_host].get_dmesg(self.selected_ref)
self.builder.get_object("txthostdmesg").get_buffer().set_text(dmesg)
self.builder.get_object("hostdmesg").show()
def on_management_activate(self, widget, data=None):
"""
"Management interfaces" on server menu is rpressed
"""
listmgmtinterfaces = self.builder.get_object("listmgmtinterfaces")
treemgmtinterfaces = self.builder.get_object("treemgmtinterfaces")
# Fill the list of interfaces with "Management" option enabled
self.xc_servers[self.selected_host].fill_mamagement_ifs_list(listmgmtinterfaces)
# Set the top label with server selected
lblmgmtinterfaces = self.builder.get_object("lblmgmtinterfaces")
lblmgmtinterfaces.set_text(lblmgmtinterfaces.get_text().replace("{0}", self.selected_name))
# Show the window
self.builder.get_object("mgmtinterface").show()
# Select the first interface by default
selection = treemgmtinterfaces.get_selection()
treemgmtinterfaces.set_cursor((0, ), treemgmtinterfaces.get_column(0))
treemgmtinterfaces.get_selection().select_path((0, ))
# Get the reference of default interface
pif_ref = listmgmtinterfaces.get_value(selection.get_selected()[1], 0)
combomgmtnetworks = self.builder.get_object("combomgmtnetworks")
listmgmtnetworks = self.builder.get_object("listmgmtnetworks")
# Get all information for this PIF
pif = self.xc_servers[self.selected_host].all['PIF'][pif_ref]
# Fill the network combo with possible networks
# fill_management_networks return the position where network reference of pif is located
current = self.xc_servers[self.selected_host].fill_management_networks(listmgmtnetworks, pif['network'])
# Set in combo the network for default PIF
combomgmtnetworks.set_active(current)
# If interface configuration is dhcp disable ip/mask/gw entries
if pif['ip_configuration_mode'] == "DHCP":
self.builder.get_object("txtmgmtip").set_sensitive(False)
self.builder.get_object("txtmgmtmask").set_sensitive(False)
self.builder.get_object("txtmgmtgw").set_sensitive(False)
# Although could be disabled, set the ip/netmask/gateway
self.builder.get_object("txtmgmtip").set_text(pif['IP'])
self.builder.get_object("txtmgmtmask").set_text(pif['netmask'])
self.builder.get_object("txtmgmtgw").set_text(pif['gateway'])
# If ip configuration is with dhcp set appropriate radio enabled
self.builder.get_object("radiomgmtipdhcp").set_active(pif['ip_configuration_mode'] == "DHCP")
self.builder.get_object("radiomgmtipmanual").set_active(pif['ip_configuration_mode'] != "DHCP")
# If dns configuration is with dhcp set appropriate radio enabled
self.builder.get_object("radiomgmtdnsdhcp").set_active(pif['DNS'] == "")
self.builder.get_object("radiomgmtdnsmanual").set_active(pif['DNS'] != "")
# If dns is manual..
if pif['DNS']:
# Fill the entries with dns ips
dns = pif['DNS'].split(",")
self.builder.get_object("txtmgmtdns1").set_text(dns[0])
if len(dns) > 1:
self.builder.get_object("txtmgmtdns2").set_text(dns[1])
else:
self.builder.get_object("txtmgmtdns2").set_text("")
else:
# If not, empty the entris and disable both entries
self.builder.get_object("txtmgmtdns1").set_sensitive(False)
self.builder.get_object("txtmgmtdns2").set_sensitive(False)
self.builder.get_object("txtmgmtdns1").set_text("")
self.builder.get_object("txtmgmtdns2").set_text("")
def on_menuitem_stg_default_activate(self, widget, data=None):
"""
"Set as Default Storage Repository" menu item is pressed (storage menu)
"""
self.xc_servers[self.selected_host].set_default_storage(self.selected_ref)
def on_menuitem_tools_statusreport_activate(self, widget, data=None):
"""
"Status report" menu item is pressed (tools menu)
"""
self.builder.get_object("statusreport").show()
listreport = self.builder.get_object("listreport")
self.xc_servers[self.selected_host].fill_list_report(self.selected_ref, listreport)
self.update_report_total_size_time()
def on_menuitem_tools_cad_activate(self, widget, data=None):
"""
"Send Ctrl-Alt-Del" menu item is pressed (tools menu)
"""
self.tunnel[data].send_data("\xfe\x01\x00\x00\x00\x00\x00\x1d")
self.tunnel[data].send_data("\xfe\x01\x00\x00\x00\x00\x00\x38")
self.tunnel[data].send_data("\xfe\x01\x00\x00\x00\x00\x00\xd3")
self.tunnel[data].send_data("\xfe\x00\x00\x00\x00\x00\x00\x1d")
self.tunnel[data].send_data("\xfe\x00\x00\x00\x00\x00\x00\x38")
self.tunnel[data].send_data("\xfe\x00\x00\x00\x00\x00\x00\xd3")
def on_menuitem_migratetool_activate(self, widget, data=None):
"""
"Migrate tool" menu item is pressed (tools menu)
"""
self.builder.get_object("spinmigratemem").set_value(256)
self.builder.get_object("spinmigratevcpus").set_value(1)
self.builder.get_object("checkmigrateoutputserver").set_sensitive(self.selected_type == "host")
self.builder.get_object("migratetool").show()
def on_menuitem_takescreenshot_activate(self, widget, data=None):
"""
"Take screenshot" menu item is pressed (tools menu)
"""
self.builder.get_object("savescreenshot").set_current_name("Screenshot_%s.jpg"
% self.selected_name.replace('/', '_'))
self.builder.get_object("savescreenshot").show()
def on_cancelsavescreenshot_clicked(self, widget, data=None):
self.builder.get_object("savescreenshot").hide()
def on_acceptsavescreenshot_clicked(self, widget, data=None):
filename = self.builder.get_object("savescreenshot").get_filename()
if self.selected_type == "vm":
self.xc_servers[self.selected_host].save_screenshot(self.selected_ref, filename)
else:
#host
ref = self.xc_servers[self.selected_host].host_vm[self.selected_ref][0]
self.xc_servers[self.selected_host].save_screenshot(ref, filename)
self.builder.get_object("savescreenshot").hide()
def on_menuitem_options_activate(self, widget, data=None):
"""
"Options" menu item is pressed (tools menu)
"""
# Enable/disable the save password option
self.builder.get_object("checksavepassword").set_active(eval(self.config["gui"]["save_password"]))
# Show the options dialog
self.builder.get_object("dialogoptions").show()
def on_menuitem_delete_activate(self, widget, data=None):
"""
"Delete" menu item is pressed (only for Pool)
"""
# Delete the pool
self.xc_servers[self.selected_host].delete_pool(self.selected_ref)
def on_menuitem_connectall_activate(self, widget, data=None):
"""
"Connect all" menu item is pressed (server menu)
"""
# For each server: connect
# TODO: fix
self.treestore.foreach(self.foreach_connect, True)
def on_menuitem_disconnectall_activate(self, widget, data=None):
# For each server: disconnect
"""
"Disconnect all" menu item is pressed (server menu)
"""
# For each server: disconnect
# TODO: fix
self.treestore.foreach(self.foreach_connect, False)
def on_collapsechildren_activate(self, widget, data=None):
"""
"Collapse Children" menu item is pressed
"""
for child in range(0, self.treestore.iter_n_children(self.selected_iter)):
iter = self.treestore.iter_nth_child(self.selected_iter, child)
if self.treestore.iter_n_children(iter):
path = self.treestore.get_path(iter)
self.treeview.collapse_row(path)
def on_expandall_activate(self, widget, data=None):
"""
"Expand all" menu item is pressed
"""
for child in range(0, self.treestore.iter_n_children(self.selected_iter)):
iter = self.treestore.iter_nth_child(self.selected_iter, child)
if self.treestore.iter_n_children(iter):
path = self.treestore.get_path(iter)
self.treeview.expand_row(path, True)
def on_menuitem_changepw_activate(self, widget, data=None):
"""
"Change Server Password" menu item is pressed
"""
self.builder.get_object("lblwrongpw").hide()
self.builder.get_object("changepassword").show()
self.builder.get_object("txtcurrentpw").set_text("")
self.builder.get_object("txtnewpw").set_text("")
self.builder.get_object("txtrenewpw").set_text("")
self.builder.get_object("acceptchangepassword").set_sensitive(False)
label = self.builder.get_object("lblchangepw").get_label()
self.builder.get_object("lblchangepw").set_label(label.replace("{0}", self.selected_name))
def on_menuitem_install_xslic_activate(self, widget, data=None):
"""
"Install License Key" menu item is pressed
"""
# Show file chooser
if self.xc_servers[self.selected_host].all['host'][self.selected_ref].get("license_server"):
licence_server = self.xc_servers[self.selected_host].all['host'][self.selected_ref].get("license_server")
self.builder.get_object("licensehost").set_text(licence_server["address"])
self.builder.get_object("licenseport").set_text(licence_server["port"])
self.builder.get_object("dialoglicensehost").show()
else:
self.builder.get_object("filterfilelicensekey").add_pattern("*.xslic")
self.builder.get_object("filelicensekey").show()
def on_cancellicensehost_clicked(self, widget, data=None):
"""
Function called when you press cancel on license host dialog
"""
self.builder.get_object("dialoglicensehost").hide()
def on_acceptlicensehost_clicked(self, widget, data=None):
"""
Function called when you press cancel on license host dialog
"""
edition = "advanced"
for licwidget in ["advanced", "enterprise", "platinum", "enterprise-xd"]:
if self.builder.get_object(licwidget).get_active():
edition = licwidget
break
licensehost = self.builder.get_object("licensehost").get_text()
licenseport = self.builder.get_object("licenseport").get_text()
self.xc_servers[self.selected_host].set_license_host(self.selected_ref, licensehost, licenseport, edition)
self.builder.get_object("dialoglicensehost").hide()
def on_cancelfilelicensekey_clicked(self, widget, data=None):
"""
Function called when you press cancel on filchooser "install license key"
"""
# Hide the file chooser
self.builder.get_object("filelicensekey").hide()
def on_openfilelicensekey_clicked(self, widget, data=None):
"""
Function called when you press open on filchooser "install license key"
"""
filename = self.builder.get_object("filelicensekey").get_filename()
self.xc_servers[self.selected_host].install_license_key(self.selected_ref, filename)
# Hide the file chooser
self.builder.get_object("filelicensekey").hide()
def on_menuitem_restoreserver_activate(self, widget, data=None):
"""
"Restoreserver" menu item is pressed
"""
# Show select destination dialog
self.builder.get_object("filefilterrestoreserver").add_pattern("*.xbk")
self.builder.get_object("filerestoreserver").show()
def on_menuitem_backupserver_activate(self, widget, data=None):
"""
"Backup server" menu item is pressed
"""
# Show select destination dialog
filebackupserver = self.builder.get_object("filebackupserver")
filebackupserver.set_current_name(self.selected_name + ".xbk")
self.builder.get_object("filefilterbackupserver").add_pattern("*.xbk")
self.builder.get_object("filebackupserver").show()
def on_menuitem_downloadlogs_activate(self, widget, data=None):
"""
"Download logs" (host) menu item is pressed
"""
# Show select destination dialog
filedownloadlogs = self.builder.get_object("filedownloadlogs")
filedownloadlogs.set_current_name(self.selected_name + ".tar.gz")
self.builder.get_object("filedownloadlogs").show()
def on_cancelfilebackupserver_clicked(self, widget, data=None):
"""
Function called when you cancel dialog for save backup server
"""
self.builder.get_object("filebackupserver").hide()
def on_savefilebackupserver_clicked(self, widget, data=None):
"""
Function called when you accept dialog for save backup server
"""
filebackupserver = self.builder.get_object("filebackupserver")
filename = filebackupserver.get_filename()
self.xc_servers[self.selected_host].thread_backup_server(self.selected_ref, filename, self.selected_name)
self.builder.get_object("filebackupserver").hide()
def on_cancelfiledownloadlogs_clicked(self, widget, data=None):
"""
Function called when you cancel dialog for download logs
"""
self.builder.get_object("filedownloadlogs").hide()
def on_savefiledownloadlogs_clicked(self, widget, data=None):
"""
Function called when you accept dialog for download logs
"""
filedownloadlogs = self.builder.get_object("filedownloadlogs")
filename = filedownloadlogs.get_filename()
self.xc_servers[self.selected_host].thread_host_download_logs(self.selected_ref, filename, self.selected_name)
self.builder.get_object("filedownloadlogs").hide()
def on_cancelrestoreserver_clicked(self, widget, data=None):
"""
Function called when you cancel dialog for open file to restore server
"""
self.builder.get_object("filerestoreserver").hide()
def on_openfilerestoreserver_clicked(self, widget, data=None):
"""
Function called when you accept dialog for open file to restore server
"""
filename = self.builder.get_object("filerestoreserver").get_filename()
self.xc_servers[self.selected_host].thread_restore_server(self.selected_ref, filename, self.selected_name)
self.builder.get_object("filerestoreserver").hide()
def on_menuitem_server_reboot_activate(self, widget, data=None):
"""
"Reboot server" menu item is pressed
"""
self.builder.get_object("confirmreboot").show()
def on_cancelconfirmreboot_clicked(self, widget, data=None):
"""
Function called when you cancel dialog for reboot server
"""
self.builder.get_object("confirmreboot").hide()
def on_acceptconfirmreboot_clicked(self, widget, data=None):
"""
Function called when you cancel dialog for reboot server
"""
res = self.xc_servers[self.selected_host].reboot_server(self.selected_ref)
#res = "OK"
if res == "OK":
self.on_m_disconnect_activate(widget, data)
self.builder.get_object("confirmreboot").hide()
def on_menuitem_server_shutdown_activate(self, widget, data=None):
"""
"Reboot server" menu item is pressed
"""
self.builder.get_object("confirmshutdown").show()
def on_acceptconfirmshutdown_clicked(self, widget, data=None):
"""
"Reboot server" menu item is pressed
"""
res = self.xc_servers[self.selected_host].shutdown_server(self.selected_ref)
if res == "OK":
self.on_m_disconnect_activate(widget, data)
self.builder.get_object("confirmshutdown").hide()
def on_cancelconfirmshutdown_clicked(self, widget, data=None):
"""
Function called when you cancel dialog for shutdown server
"""
self.builder.get_object("confirmshutdown").hide()
def on_menuitem_checkforupdates_activate(self, widget, data=None):
"""
"Check for Updates" menu item is pressed (help)
"""
pool = []
hotfix = []
# Get pool and patch info
for server in self.xc_servers.values():
for host in server.all['host']:
pool.append("pool_" + server.all['host'][host]["software_version"]["product_version"] + "=1")
for patch in server.all['host'][host]["patches"]:
host_patch = server.all['host_patch'][patch]
if host_patch["applied"]:
hotfix.append("hotfix_" + server.all['pool_patch'][host_patch["pool_patch"]]["uuid"] + "=1")
else:
hotfix.append("hotfix_" + server.all['pool_patch'][host_patch["pool_patch"]]["uuid"] + "=0")
# TODO: Get the server version from the server, to generate correct URL
url = "http://updates.xensource.com/XenServer/5.5.2/XenCenter?%s;%s" % (";".join(pool), ";".join(hotfix))
import webbrowser
webbrowser.open(url)
def on_menuitem_xenserver_on_the_web_activate(self, widget, data=None):
"""
"Xenserver on the web" menu item is pressed (help)
"""
url = "http://www.xenserver.com"
import webbrowser
webbrowser.open(url)
def on_menuitem_help_activate(self, widget, data=None):
"""
"About" menu item is pressed (Help)
"""
# Show about dialog
about = self.builder.get_object("aboutdialog")
about.set_version(__version__)
about.show()
def on_menuitem_pool_remove_server_activate(self, widget, data=None):
"""
"Remove server" (from pool) menu item is pressed (pool)
"""
self.last_dialog_label = self.builder.get_object("removeserverfrompool").get_property("text")
label = self.builder.get_object("removeserverfrompool").get_property("text")
pool_ref = self.xc_servers[self.selected_host].all['pool'].keys()[0]
self.builder.get_object("removeserverfrompool").set_markup(
label.replace("{0}", self.selected_name).replace(
"{1}", self.xc_servers[self.selected_host].all['pool'][pool_ref]["name_label"]))
self.builder.get_object("removeserverfrompool").show()
def on_acceptremoveserverfrompool_clicked(self, widget, data=None):
"""
Function called when you accept remove server from pool
"""
Thread(target=self.xc_servers[self.selected_host].remove_server_from_pool,
args=(self.selected_ref,)).start()
self.builder.get_object("removeserverfrompool").hide()
self.builder.get_object("removeserverfrompool").set_markup(self.last_dialog_label)
def on_cancelremoveserverfrompool_clicked(self, widget, data=None):
"""
Function called when you accept remove server from pool
"""
self.builder.get_object("removeserverfrompool").hide()
self.builder.get_object("removeserverfrompool").set_markup(self.last_dialog_label)
def on_menuitem_pool_backupdb_activate(self, widget, data=None):
"""
"Backup database" menu item is pressed(pool)
"""
self.builder.get_object("filterfilepoolbackupdb").add_pattern("*.xml")
filepoolbackupdb = self.builder.get_object("filepoolbackupdb")
filepoolbackupdb.set_current_name(self.selected_name + "_backup_db.xml")
filepoolbackupdb.show()
def on_cancelfilepoolbackupdb_clicked(self, widget, data=None):
"""
"Cancel" press on file chooser dialog for database pool backup
"""
self.builder.get_object("filepoolbackupdb").hide()
def on_acceptfilepoolbackupdb_clicked(self, widget, data=None):
"""
"Cancel" press on file chooser dialog for database pool backup
"""
filename = self.builder.get_object("filepoolbackupdb").get_filename()
self.xc_servers[self.selected_host].pool_backup_database(self.selected_ref, filename, self.selected_name)
self.builder.get_object("filepoolbackupdb").hide()
def on_rebootconfirmpoolrestoredb_clicked(self, widget, data=None):
"""
"Reboot" press on dialog restore database pool (reboot/dry run/cancel)
"""
self.builder.get_object("confirmpoolrestoredb").hide()
filename = self.builder.get_object("filepoolrestoredb").get_filename()
Thread(target=self.xc_servers[self.selected_host].pool_restore_database,
args=(self.selected_ref, filename, self.selected_name, "false")).start()
self.builder.get_object("filepoolrestoredb").hide()
def on_dryrunconfirmpoolrestoredb_clicked(self, widget, data=None):
"""
"Dry run" press on dialog restore database pool (reboot/dry run/cancel)
"""
self.builder.get_object("confirmpoolrestoredb").hide()
filename = self.builder.get_object("filepoolrestoredb").get_filename()
Thread(target=self.xc_servers[self.selected_host].pool_restore_database,
args=(self.selected_ref, filename, self.selected_name, "true")).start()
self.builder.get_object("filepoolrestoredb").hide()
def on_cancelconfirmpoolrestoredb_clicked(self, widget, data=None):
"""
"Dry run" press on dialog restore database pool (reboot/dry run/cancel)
"""
self.builder.get_object("confirmpoolrestoredb").hide()
self.builder.get_object("filepoolbackupdb").hide()
def on_menuitem_pool_restoredb_activate(self, widget, data=None):
"""
"Restore database" menu item is pressed(pool)
"""
self.builder.get_object("filepoolrestoredb").show()
def on_cancelfilepoolrestoredb_clicked(self, widget, data=None):
"""
"Cancel" press on file chooser dialog for database pool restore
"""
self.builder.get_object("filepoolrestoredb").hide()
def on_acceptfilepoolrestoredb_clicked(self, widget, data=None):
"""
"Open" press on file chooser dialog for database pool restore
"""
self.builder.get_object("confirmpoolrestoredb").show()
self.builder.get_object("filepoolrestoredb").hide()
def on_menuitem_pool_disconnect_activate(self, widget, data=None):
"""
"Disconnect" (from pool) menu item is pressed
"""
self.on_m_disconnect_activate(widget, data)
def on_menuitem_pool_new_activate(self, widget, data=None):
"""
"New Pool..." menu item is pressed
"""
listpoolmaster = self.builder.get_object("listpoolmaster")
listpoolmaster.clear()
combopoolmaster = self.builder.get_object("combopoolmaster")
# For each server add to combobox master servers list
for host in self.config_hosts.keys():
# If server is connected..
if host in self.xc_servers:
# Add to combo
pool = False
for pool_ref in self.xc_servers[host].all['pool']:
if self.xc_servers[host].all['pool'][pool_ref]['name_label'] != "":
pool = True
if not pool:
listpoolmaster.append([host, self.xc_servers[host].hostname])
# Set the first as default
combopoolmaster.set_active(0)
ref = None
# If there are servers added to combobox, get the ref
if combopoolmaster.get_active_iter():
ref = listpoolmaster.get_value(combopoolmaster.get_active_iter(), 0)
listpoolvms = self.builder.get_object("listpoolvms")
listpoolvms.clear()
# For each server add to possible servers for pool
for host in self.config_hosts.keys():
if host not in self.xc_servers:
listpoolvms.append([None, host, 0, "Disconnected", False])
else:
if self.xc_servers[host].is_connected:
pool = False
for pool_ref in self.xc_servers[host].all['pool']:
if self.xc_servers[host].all['pool'][pool_ref]['name_label'] != "":
pool = True
if not pool:
if ref != host:
listpoolvms.append([host, self.xc_servers[host].hostname, False, "", True])
else:
listpoolvms.append([host, self.xc_servers[host].hostname, True, "Master", False])
else:
listpoolvms.append([host, self.xc_servers[host].hostname, False,
"This server is already in a pool", False])
else:
listpoolvms.append([None, host, 0, "Disconnected", False])
# Show the "newpool" window
self.builder.get_object("newpool").show()
def update_menubar(self):
"""
This function is called when a VM, host, storage or template is selected
Depends if you selected a server, host (server connected), vm then
A menuitems are enabled and others are disabled
"""
show = {}
if self.selected_type == "pool":
show["menu5"] = ["menuitem_pool_new", "menuitem_pool_delete", "menuitem_pool_disconnect",
"menuitem_pool_prop", "menuitem_pool_backupdb", "menuitem_pool_restoredb",
"menuitem_pool_add_server"]
# TODO: disable menuitem_connectall
show["menu6"] = ["menuitem_addserver", "menuitem_disconnectall", "menuitem_connectall", "menuitem_forget",
"menuitem_remove"]
show["menu7"] = ["menuitem_importvm2"]
show["menu8"] = [""]
show["menu9"] = [""]
show["menu10"] = ["menuitem_options", "menuitem_migratetool", "menuitem_tools_updatemanager"]
if self.selected_type == "home":
show["menu5"] = [""]
# TODO: disable menuitem_connectall
show["menu6"] = ["menuitem_addserver", "menuitem_connectall", "menuitem_disconnectall"]
show["menu7"] = ["menuitem_importvm2"]
show["menu8"] = [""]
show["menu9"] = [""]
show["menu10"] = ["menuitem_options", "menuitem_tools_alerts", "menuitem_migratetool"]
if self.selected_type == "server":
if self.selected_state == "Disconnected":
show["menu5"] = ["menuitem_pool_new"]
# TODO: disable menuitem_connectall
show["menu6"] = ["menuitem_addserver", "menuitem_disconnectall", "menuitem_connectall",
"menuitem_connect", "menuitem_forget", "menuitem_remove"]
show["menu7"] = ["menuitem_importvm2"]
show["menu8"] = [""]
show["menu9"] = [""]
show["menu10"] = ["menuitem_options", "menuitem_migratetool"]
if self.selected_type == "host":
show["menu5"] = ["menuitem_pool_new"]
# TODO: use allowed_operations reboot/shutdown
show["menu6"] = ["menuitem_addserver", "menuitem_disconnectall", "menuitem_disconnect",
"menuitem_forget", "menuitem_remove", "menuitem_newvm", "menuitem_server_prop",
"menuitem_mgmt_ifs", "menuitem_dmesg", "menuitem_server_reboot",
"menuitem_server_shutdown", "menuitem_changepw", "menuitem_backupserver",
"menuitem_restoreserver", "menuitem_install_xslic", "menuitem_server_add_to_pool",
"menuitem_downloadlogs"]
show["menu7"] = ["menuitem_importvm2", "menuitem_newvm2"]
show["menu8"] = ["menuitem_stg_new"]
show["menu9"] = ["menuitem_tpl_import"]
show["menu10"] = ["menuitem_options", "menuitem_tools_alerts", "menuitem_takescreenshot",
"menuitem_migratetool", "menuitem_tools_statusreport", "menuitem_tools_updatemanager"]
pool_ref = self.xc_servers[self.selected_host].all['pool'].keys()[0]
if self.xc_servers[self.selected_host].all['host'][self.selected_ref]["enabled"]:
show["menu6"].append("menuitem_entermaintenancemode")
else:
show["menu6"].append("menuitem_exitmaintenancemode")
if self.xc_servers[self.selected_host].all['pool'][pool_ref]["name_label"] != '' and \
self.xc_servers[self.selected_host].all['pool'][pool_ref]["master"] != self.selected_ref:
show["menu5"].append("menuitem_pool_remove_server")
if self.selected_type == "vm":
show["menu6"] = ["menuitem_newvm", "menuitem_server_prop", "menuitem_mgmt_ifs", "menuitem_addserver",
"menuitem_disconnectall"]
show["menu7"] = ["menuitem_importvm2", "menuitem_newvm2", "menuitem_vm_prop"]
show["menu8"] = ["menuitem_stg_new", "menuitem_stg_newvdi", "menuitem_stg_attachvdi"]
show["menu9"] = ["menuitem_tpl_import"]
show["menu10"] = ["menuitem_options", "menuitem_tools_alerts", "menuitem_takescreenshot",
"menuitem_migratetool"]
# Special case
# If in allowed operations of selected VM exists "start", then add the menu item
# "start in recovery mode"
for op in self.xc_servers[self.selected_host].all['vms'][self.selected_ref]['allowed_operations']:
show["menu7"].append("menuitem_vm_" + op)
if op == "start":
show["menu7"].append("menuitem_vm_startrecovery")
if self.selected_state == "Running":
show["menu7"].append("menuitem_vm_install_xs_tools")
if self.selected_type == "storage":
show["menu5"] = ["menuitem_pool_new"]
show["menu6"] = ["menuitem_addserver", "menuitem_connectall", "menuitem_disconnectall", "menuitem_newvm"]
show["menu7"] = ["menuitem_importvm2", "menuitem_newvm2"]
show["menu8"] = ["menuitem_stg_new", "menuitem_stg_newvdi", "menuitem_stg_attachvdi"]
show["menu9"] = [""]
show["menu10"] = ["menuitem_options", "menuitem_tools_alerts", "menuitem_migratetool"]
if self.xc_servers[self.selected_host].all['SR'][self.selected_ref]['allowed_operations'].count(
"vdi_create") > 0:
show["menu8"].append("menuitem_stg_default")
if self.selected_type == "template":
show["menu5"] = ["menuitem_pool_new"]
show["menu6"] = ["menuitem_addserver", "menuitem_connectall", "menuitem_disconnectall", "menuitem_newvm"]
show["menu7"] = ["menuitem_importvm2", "menuitem_newvm2"]
show["menu8"] = ["menuitem_stg_new", "", ""]
show["menu9"] = ["menuitem_tpl_newvm", "menuitem_tpl_import", "menuitem_tpl_export", "menuitem_tpl_copy",
"menuitem_tpl_delete"]
show["menu10"] = ["menuitem_options", "menuitem_tools_alerts", "menuitem_migratetool"]
# For each menu...
for menu in show:
# For each child of this menu..
for child in self.builder.get_object(menu).get_children():
# Check if is on "show" variable
if show[menu].count(gtk.Buildable.get_name(child)):
# If is on: enable menuitem
child.set_sensitive(True)
else:
# Else: disable menuitem
child.set_sensitive(False)
def on_tm_logwindow_activate(self, widget, data=None):
# TODO: fix it URGENT
for i in range(1, 1):
self.builder.get_object("logwindow").show()
vboxframe = gtk.Frame()
if i % 2 == 0:
vboxframe.set_size_request(500, 100)
else:
vboxframe.set_size_request(500, 80)
vboxchild = gtk.Fixed()
vboxchildlabel1 = gtk.Label()
vboxchildlabel2 = gtk.Label()
vboxchildlabel3 = gtk.Label()
vboxchildlabel4 = gtk.Label()
vboxchildlabel5 = gtk.Label()
#FIXME
#vboxchildprogressbar.set_style(1)
vboxchildlabel1.set_label("Starting ... ")
vboxchildlabel2.set_label("23:28 04/08/2009")
vboxchildlabel3.set_label("Details: problem starting..")
vboxchildlabel4.set_label("Time: 00:00:00")
vboxchild.put(vboxchildlabel1, 25, 12)
vboxchild.put(vboxchildlabel2, 800, 12)
vboxchild.put(vboxchildlabel3, 25, 32)
vboxchild.put(vboxchildlabel4, 25, 52)
# Active task
if i % 2 == 0:
vboxchildcancel = gtk.Button()
vboxchildprogressbar = gtk.ProgressBar()
vboxchildprogressbar.set_size_request(800, 20)
vboxchildprogressbar.set_fraction(float(1/float(i)))
vboxchild.put(vboxchildcancel, 800, 32)
vboxchildcancel.set_label("Cancel")
vboxchildlabel5.set_label("Progress: ")
vboxchild.put(vboxchildprogressbar, 100, 72)
vboxchild.put(vboxchildlabel5, 25, 72)
vboxframe.add(vboxchild)
if i % 2 == 0:
vboxframe.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("red"))
else:
vboxframe.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("black"))
self.builder.get_object("vboxlog").add(vboxframe)
self.builder.get_object("vboxlog").show_all()
| gpl-2.0 | 9,121,037,751,998,276,000 | 46.56056 | 120 | 0.601008 | false |
usirin/koding | go/src/vendor/github.com/caglar10ur/lxc/src/python-lxc/examples/pyconsole-vte.py | 22 | 2202 | #!/usr/bin/env python3
#
# pyconsole-vte: Example program showing use of console functions
# in the lxc python binding
#
# (C) Copyright Oracle. 2013
#
# Authors:
# Dwight Engen <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
#
import gtk
import vte
import lxc
import sys
def gtk_exit_cb(terminal):
gtk.main_quit()
def vte_con(ct, ttynum):
print("Doing console in a VTE widget...")
masterfd = ct.console_getfd(ttynum)
term = vte.Terminal()
term.set_cursor_blinks(True)
term.set_scrollback_lines(1000)
term.connect('eof', gtk_exit_cb)
term.set_pty(masterfd)
term.feed_child('\n')
#term.feed_child('ps aux\n')
vscrollbar = gtk.VScrollbar()
vscrollbar.set_adjustment(term.get_adjustment())
hbox = gtk.HBox()
hbox.pack_start(term)
hbox.pack_start(vscrollbar)
window = gtk.Window()
window.add(hbox)
window.connect('delete-event', lambda window, event: gtk.main_quit())
window.show_all()
gtk.main()
print("Console done")
if __name__ == '__main__':
ttynum = -1
if len(sys.argv) < 2:
sys.exit("Usage: %s container-name [ttynum]" % sys.argv[0])
if len(sys.argv) > 2:
ttynum = int(sys.argv[2])
ct = lxc.Container(sys.argv[1])
print("Container:%s tty:%d" % (ct.name, ttynum))
if not ct.defined:
sys.exit("Container %s not defined" % ct.name)
if not ct.running:
sys.exit("Container %s not running" % ct.name)
vte_con(ct, ttynum)
| apache-2.0 | 8,788,840,997,775,546,000 | 27.230769 | 75 | 0.67257 | false |
ycool/apollo | modules/tools/map_gen/create_traffic_light_from_event.py | 3 | 5178 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
This program can create a traffic light protobuf from localization message
"""
import rosbag
import std_msgs
import argparse
import shutil
import os
import rospy
import sys
import math
from std_msgs.msg import String
import common.proto_utils as proto_utils
from common.message_manager import PbMessageManager
from modules.map.proto import map_signal_pb2
from modules.map.proto import map_geometry_pb2
g_message_manager = PbMessageManager()
g_args = None
# mkz vehicle configuration
g_front_to_center = 4.0
g_left_to_center = 1.043 + 0.5
g_right_to_center = 1.043 + 0.5
g_lane_width = 3.7
def create_stop_line(center_x, center_y, heading):
"""create a stop line from center point"""
left_x = center_x + g_left_to_center * math.cos(heading + math.pi / 2.0)
left_y = center_y + g_left_to_center * math.sin(heading + math.pi / 2.0)
right_x = center_x + g_right_to_center * math.cos(heading - math.pi / 2.0)
right_y = center_y + g_right_to_center * math.sin(heading - math.pi / 2.0)
stop_line = map_geometry_pb2.Curve()
curve_segment = stop_line.segment.add()
left_point = curve_segment.line_segment.point.add()
left_point.x = left_x
left_point.y = left_y
center_point = curve_segment.line_segment.point.add()
center_point.x = center_x
center_point.y = center_y
right_point = curve_segment.line_segment.point.add()
right_point.x = right_x
right_point.y = right_y
return stop_line
def create_signal_proto(x, y, heading):
# mkz vehicle configuration
center_x = x + g_front_to_center * math.cos(heading)
center_y = y + g_front_to_center * math.sin(heading)
map_signal = map_signal_pb2.Signal()
map_signal.id.id = "%2.5f_%2.5f" % (center_x, center_y)
map_signal.type = map_signal_pb2.Signal.MIX_3_VERTICAL
# left subsignal
left_subsignal = map_signal.subsignal.add()
left_x = center_x + g_left_to_center * math.cos(heading + math.pi / 2.0)
left_y = center_y + g_left_to_center * math.sin(heading + math.pi / 2.0)
left_subsignal.id.id = "%2.5f_%2.5f" % (left_x, left_y)
left_subsignal.type = map_signal_pb2.Subsignal.CIRCLE
left_subsignal.location.x = left_x
left_subsignal.location.y = left_y
left_subsignal.location.z = 5.0
stopline = map_signal.stop_line.add()
stopline.CopyFrom(create_stop_line(center_x, center_y, heading))
if g_args.extend_to_neighbor_lane:
# add stop line on left lane
left_shift_x = center_x + g_lane_width * math.cos(
heading + math.pi / 2.0)
left_shift_y = center_y + g_lane_width * math.sin(
heading + math.pi / 2.0)
stopline = map_signal.stop_line.add()
stopline.CopyFrom(
create_stop_line(left_shift_x, left_shift_y, heading))
# add stop line on right lane
right_shift_x = center_x + g_lane_width * math.cos(
heading - math.pi / 2.0)
right_shift_y = center_y + g_lane_width * math.sin(
heading - math.pi / 2.0)
stopline = map_signal.stop_line.add()
stopline.CopyFrom(
create_stop_line(right_shift_x, right_shift_y, heading))
return map_signal
def parse_drive_event_file(drive_event_filename, signal_filename):
drive_event = g_message_manager.parse_topic_file("/apollo/drive_event",
drive_event_filename)
if not drive_event:
print("Failed to find localization in %s" % drive_event_filename)
return None
pose = drive_event.location
map_signal = create_signal_proto(pose.position.x, pose.position.y,
pose.heading)
proto_utils.write_pb_to_text_file(map_signal, signal_filename)
return map_signal
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A tool to create traffic light protobuf message from localization.")
parser.add_argument(
"drive_event_filename",
action="store",
help="""the drive event file name""")
parser.add_argument(
"signal_filename", action="store", help="""the signal file name""")
parser.add_argument(
"--extend_to_neighbor_lane",
action="store_true",
help="""the signal file name""")
g_args = parser.parse_args()
parse_drive_event_file(g_args.drive_event_filename, g_args.signal_filename)
| apache-2.0 | -1,153,224,613,218,993,700 | 34.958333 | 89 | 0.637312 | false |
shirishgoyal/crowdsource-platform | crowdsourcing/migrations/0125_boomeranglog.py | 1 | 1066 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-08-03 03:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0124_auto_20160725_2123'),
]
operations = [
migrations.CreateModel(
name='BoomerangLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('min_rating', models.FloatField(default=3.0)),
('rating_updated_at', models.DateTimeField(null=True)),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='boomerang_logs', to='crowdsourcing.Project')),
],
options={
'abstract': False,
},
),
]
| mit | 6,136,330,231,292,648,000 | 34.533333 | 154 | 0.586304 | false |
Mellthas/quodlibet | quodlibet/quodlibet/cli.py | 4 | 10325 | # Copyright 2014,2016 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
from senf import fsn2text, uri2fsn
from quodlibet import C_, _
from quodlibet.util.dprint import print_, print_e
from quodlibet.remote import Remote, RemoteError
def exit_(status=None, notify_startup=False):
"""Call this to abort the startup before any mainloop starts.
notify_startup needs to be true if QL could potentially have been
called from the desktop file.
"""
if notify_startup:
import gi
gi.require_version("Gdk", "3.0")
from gi.repository import Gdk
Gdk.notify_startup_complete()
raise SystemExit(status)
def is_running():
"""If maybe is another instance running"""
return Remote.remote_exists()
def control(command, arg=None, ignore_error=False):
"""Sends command to the existing instance if possible and exits.
Will print any response it gets to stdout.
Does not return except if ignore_error is True and sending
the command failed.
"""
if not is_running():
if ignore_error:
return
exit_(_("Quod Libet is not running (add '--run' to start it)"),
notify_startup=True)
return
message = command
if arg is not None:
message += " " + arg
try:
response = Remote.send_message(message)
except RemoteError as e:
if ignore_error:
return
exit_(str(e), notify_startup=True)
else:
if response is not None:
print_(response, end="", flush=True)
exit_(notify_startup=True)
def process_arguments(argv):
from quodlibet.util.path import uri_is_valid
from quodlibet import util
from quodlibet import const
actions = []
controls = ["next", "previous", "play", "pause", "play-pause", "stop",
"hide-window", "show-window", "toggle-window",
"focus", "quit", "unfilter", "refresh", "force-previous"]
controls_opt = ["seek", "repeat", "query", "volume", "filter",
"rating", "set-browser", "open-browser", "shuffle",
"queue", "stop-after", "random", "repeat-type",
"shuffle-type", "add-location"]
options = util.OptionParser(
"Quod Libet", const.VERSION,
_("a music library and player"),
_("[option]"))
options.add("print-playing", help=_("Print the playing song and exit"))
options.add("start-playing", help=_("Begin playing immediately"))
options.add("start-hidden", help=_("Don't show any windows on start"))
for opt, help in [
("next", _("Jump to next song")),
("previous",
_("Jump to previous song or restart if near the beginning")),
("force-previous", _("Jump to previous song")),
("play", _("Start playback")),
("pause", _("Pause playback")),
("play-pause", _("Toggle play/pause mode")),
("stop", _("Stop playback")),
("volume-up", _("Turn up volume")),
("volume-down", _("Turn down volume")),
("rating-up", _("Increase rating of playing song by one star")),
("rating-down", _("Decrease rating of playing song by one star")),
("status", _("Print player status")),
("hide-window", _("Hide main window")),
("show-window", _("Show main window")),
("toggle-window", _("Toggle main window visibility")),
("focus", _("Focus the running player")),
("unfilter", _("Remove active browser filters")),
("refresh", _("Refresh and rescan library")),
("list-browsers", _("List available browsers")),
("print-playlist", _("Print the current playlist")),
("print-queue", _("Print the contents of the queue")),
("print-query-text", _("Print the active text query")),
("no-plugins", _("Start without plugins")),
("run", _("Start Quod Libet if it isn't running")),
("quit", _("Exit Quod Libet")),
]:
options.add(opt, help=help)
for opt, help, arg in [
("seek", _("Seek within the playing song"), _("[+|-][HH:]MM:SS")),
("shuffle", _("Set or toggle shuffle mode"), "0|1|t"),
("shuffle-type", _("Set shuffle mode type"), "random|weighted|off"),
("repeat", _("Turn repeat off, on, or toggle it"), "0|1|t"),
("repeat-type", _("Set repeat mode type"), "current|all|one|off"),
("volume", _("Set the volume"), "[+|-]0..100"),
("query", _("Search your audio library"), _("query")),
("play-file", _("Play a file"), C_("command", "filename")),
("rating", _("Set rating of playing song"), "[+|-]0.0..1.0"),
("set-browser", _("Set the current browser"), "BrowserName"),
("stop-after", _("Stop after the playing song"), "0|1|t"),
("open-browser", _("Open a new browser"), "BrowserName"),
("queue", _("Show or hide the queue"), "on|off|t"),
("random", _("Filter on a random value"), C_("command", "tag")),
("filter", _("Filter on a tag value"), _("tag=value")),
("enqueue", _("Enqueue a file or query"), "%s|%s" % (
C_("command", "filename"), _("query"))),
("enqueue-files", _("Enqueue comma-separated files"), "%s[,%s..]" % (
_("filename"), _("filename"))),
("print-query", _("Print filenames of results of query to stdout"),
_("query")),
("unqueue", _("Unqueue a file or query"), "%s|%s" % (
C_("command", "filename"), _("query"))),
("add-location", _("Add a file or directory to the library"),
_("location")),
]:
options.add(opt, help=help, arg=arg)
options.add("sm-config-prefix", arg="dummy")
options.add("sm-client-id", arg="prefix")
options.add("screen", arg="dummy")
def is_vol(str):
if len(str) == 1 and str[0] in '+-':
return True
return is_float(str)
def is_rate(str):
if len(str) == 1 and str[0] in '+-':
return True
return is_float(str)
def is_time(str):
if str[0] not in "+-0123456789":
return False
elif str[0] in "+-":
str = str[1:]
parts = str.split(":")
if len(parts) > 3:
return False
else:
return not (False in [p.isdigit() for p in parts])
def is_float(str):
try:
float(str)
except ValueError:
return False
else:
return True
validators = {
"shuffle": ["0", "1", "t", "on", "off", "toggle"].__contains__,
"shuffle-type": ["random", "weighted", "off", "0"].__contains__,
"repeat": ["0", "1", "t", "on", "off", "toggle"].__contains__,
"repeat-type": ["current", "all", "one", "off", "0"].__contains__,
"volume": is_vol,
"seek": is_time,
"rating": is_rate,
"stop-after": ["0", "1", "t"].__contains__,
}
cmds_todo = []
def queue(*args):
cmds_todo.append(args)
# XXX: to make startup work in case the desktop file isn't passed
# a file path/uri
if argv[-1] == "--play-file":
argv = argv[:-1]
opts, args = options.parse(argv[1:])
for command, arg in opts.items():
if command in controls:
queue(command)
elif command in controls_opt:
if command in validators and not validators[command](arg):
print_e(_("Invalid argument for '%s'.") % command)
print_e(_("Try %s --help.") % fsn2text(argv[0]))
exit_(True, notify_startup=True)
else:
queue(command, arg)
elif command == "status":
queue("status")
elif command == "print-playlist":
queue("dump-playlist")
elif command == "print-queue":
queue("dump-queue")
elif command == "list-browsers":
queue("dump-browsers")
elif command == "volume-up":
queue("volume +")
elif command == "volume-down":
queue("volume -")
elif command == "rating-up":
queue("rating +")
elif command == "rating-down":
queue("rating -")
elif command == "enqueue" or command == "unqueue":
try:
filename = uri2fsn(arg)
except ValueError:
filename = arg
queue(command, filename)
elif command == "enqueue-files":
queue(command, arg)
elif command == "play-file":
if uri_is_valid(arg) and arg.startswith("quodlibet://"):
# TODO: allow handling of URIs without --play-file
queue("uri-received", arg)
else:
try:
filename = uri2fsn(arg)
except ValueError:
filename = arg
filename = os.path.abspath(util.path.expanduser(arg))
queue("play-file", filename)
elif command == 'add-location':
try:
path = uri2fsn(arg)
except ValueError:
path = arg
path = os.path.abspath(util.path.expanduser(arg))
queue("add-location", path)
elif command == "print-playing":
try:
queue("print-playing", args[0])
except IndexError:
queue("print-playing")
elif command == "print-query":
queue(command, arg)
elif command == "print-query-text":
queue(command)
elif command == "start-playing":
actions.append(command)
elif command == "start-hidden":
actions.append(command)
elif command == "no-plugins":
actions.append(command)
elif command == "run":
actions.append(command)
if cmds_todo:
for cmd in cmds_todo:
control(*cmd, **{"ignore_error": "run" in actions})
else:
# this will exit if it succeeds
control('focus', ignore_error=True)
return actions, cmds_todo
| gpl-2.0 | 4,336,820,668,269,661,700 | 35.22807 | 77 | 0.534722 | false |
luogangyi/bcec-nova | nova/api/openstack/compute/contrib/services.py | 5 | 7900 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova import servicegroup
from nova import utils
authorize = extensions.extension_authorizer('compute', 'services')
CONF = cfg.CONF
CONF.import_opt('service_down_time', 'nova.service')
class ServicesIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('services')
elem = xmlutil.SubTemplateElement(root, 'service', selector='services')
elem.set('id')
elem.set('binary')
elem.set('host')
elem.set('zone')
elem.set('status')
elem.set('state')
elem.set('updated_at')
elem.set('disabled_reason')
return xmlutil.MasterTemplate(root, 1)
class ServiceUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('service', selector='service')
root.set('host')
root.set('binary')
root.set('status')
root.set('disabled_reason')
return xmlutil.MasterTemplate(root, 1)
class ServiceUpdateDeserializer(wsgi.XMLDeserializer):
def default(self, string):
node = xmlutil.safe_minidom_parse_string(string)
service = {}
service_node = self.find_first_child_named(node, 'service')
if service_node is None:
return service
service['host'] = service_node.getAttribute('host')
service['binary'] = service_node.getAttribute('binary')
service['disabled_reason'] = service_node.getAttribute(
'disabled_reason')
return dict(body=service)
class ServiceController(object):
def __init__(self, ext_mgr=None, *args, **kwargs):
self.host_api = compute.HostAPI()
self.servicegroup_api = servicegroup.API()
self.ext_mgr = ext_mgr
def _get_services(self, req):
context = req.environ['nova.context']
authorize(context)
services = self.host_api.service_get_all(
context, set_zones=True)
host = ''
if 'host' in req.GET:
host = req.GET['host']
binary = ''
if 'binary' in req.GET:
binary = req.GET['binary']
if host:
services = [s for s in services if s['host'] == host]
if binary:
services = [s for s in services if s['binary'] == binary]
return services
def _get_service_detail(self, svc, detailed):
alive = self.servicegroup_api.service_is_up(svc)
state = (alive and "up") or "down"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
service_detail = {'binary': svc['binary'], 'host': svc['host'],
'zone': svc['availability_zone'],
'status': active, 'state': state,
'updated_at': svc['updated_at']}
if self.ext_mgr.is_loaded('os-extended-services-delete'):
service_detail['id'] = svc['id']
if detailed:
service_detail['disabled_reason'] = svc['disabled_reason']
return service_detail
def _get_services_list(self, req, detailed):
services = self._get_services(req)
svcs = []
for svc in services:
svcs.append(self._get_service_detail(svc, detailed))
return svcs
def _is_valid_as_reason(self, reason):
try:
utils.check_string_length(reason.strip(), 'Disabled reason',
min_length=1, max_length=255)
except exception.InvalidInput:
return False
return True
@wsgi.response(204)
def delete(self, req, id):
"""Deletes the specified service."""
if not self.ext_mgr.is_loaded('os-extended-services-delete'):
raise webob.exc.HTTPMethodNotAllowed()
context = req.environ['nova.context']
authorize(context)
try:
self.host_api.service_delete(context, id)
except exception.ServiceNotFound:
explanation = _("Service %s not found.") % id
raise webob.exc.HTTPNotFound(explanation=explanation)
@wsgi.serializers(xml=ServicesIndexTemplate)
def index(self, req):
"""Return a list of all running services."""
detailed = self.ext_mgr.is_loaded('os-extended-services')
services = self._get_services_list(req, detailed)
return {'services': services}
@wsgi.deserializers(xml=ServiceUpdateDeserializer)
@wsgi.serializers(xml=ServiceUpdateTemplate)
def update(self, req, id, body):
"""Enable/Disable scheduling for a service."""
context = req.environ['nova.context']
authorize(context)
ext_loaded = self.ext_mgr.is_loaded('os-extended-services')
if id == "enable":
disabled = False
status = "enabled"
elif (id == "disable" or
(id == "disable-log-reason" and ext_loaded)):
disabled = True
status = "disabled"
else:
msg = _("Unknown action")
raise webob.exc.HTTPNotFound(explanation=msg)
try:
host = body['host']
binary = body['binary']
ret_value = {
'service': {
'host': host,
'binary': binary,
'status': status,
},
}
status_detail = {
'disabled': disabled,
'disabled_reason': None,
}
if id == "disable-log-reason":
reason = body['disabled_reason']
if not self._is_valid_as_reason(reason):
msg = _('The string containing the reason for disabling '
'the service contains invalid characters or is '
'too long.')
raise webob.exc.HTTPUnprocessableEntity(detail=msg)
status_detail['disabled_reason'] = reason
ret_value['service']['disabled_reason'] = reason
except (TypeError, KeyError):
msg = _('Invalid attribute in the request')
if 'host' in body and 'binary' in body:
msg = _('Missing disabled reason field')
raise webob.exc.HTTPUnprocessableEntity(detail=msg)
try:
self.host_api.service_update(context, host, binary, status_detail)
except exception.HostBinaryNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return ret_value
class Services(extensions.ExtensionDescriptor):
"""Services support."""
name = "Services"
alias = "os-services"
namespace = "http://docs.openstack.org/compute/ext/services/api/v2"
updated = "2012-10-28T00:00:00-00:00"
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('os-services',
ServiceController(self.ext_mgr))
resources.append(resource)
return resources
| apache-2.0 | -668,944,093,662,871,900 | 33.801762 | 79 | 0.587848 | false |
danidomi/fofix | src/Input.py | 3 | 18074 | #####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire #
# Copyright (C) 2006 Sami Kyöstilä #
# 2008 myfingershurt #
# 2008 Glorandwarf #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
import pygame
import Log
import Audio
try:
import pygame.midi
haveMidi = True
except ImportError:
haveMidi = False
haveMidi = False
from Task import Task
import Player
from Player import Controls
import Config #MFH
class KeyListener:
def keyPressed(self, key, unicode):
pass
def keyReleased(self, key):
pass
def lostFocus(self):
pass
def exitRequested(self):
pass
class MouseListener:
def mouseButtonPressed(self, button, pos):
pass
def mouseButtonReleased(self, button, pos):
pass
def mouseMoved(self, pos, rel):
pass
class SystemEventListener:
def screenResized(self, size):
pass
def restartRequested(self):
pass
def musicFinished(self):
pass
def quit(self):
pass
MusicFinished = pygame.USEREVENT
class Input(Task):
def __init__(self):
self.logClassInits = Config.get("game", "log_class_inits")
if self.logClassInits == 1:
Log.debug("Input class init (Input.py)...")
Task.__init__(self)
self.mouse = pygame.mouse
self.mouseListeners = []
self.keyListeners = []
self.systemListeners = []
self.priorityKeyListeners = []
self.controls = Controls()
self.activeGameControls = []
self.p2Nav = self.controls.p2Nav
self.type1 = self.controls.type[0]
self.keyCheckerMode = Config.get("game","key_checker_mode")
self.disableKeyRepeat()
self.gameGuitars = 0
self.gameDrums = 0
self.gameMics = 0
self.gameBots = 0
# Initialize joysticks
pygame.joystick.init()
self.joystickNames = {}
self.joystickAxes = {}
self.joystickHats = {}
self.joysticks = [pygame.joystick.Joystick(id) for id in range(pygame.joystick.get_count())]
for j in self.joysticks:
j.init()
self.joystickNames[j.get_id()] = j.get_name()
self.joystickAxes[j.get_id()] = [0] * j.get_numaxes()
self.joystickHats[j.get_id()] = [(0, 0)] * j.get_numhats()
Log.debug("%d joysticks found." % len(self.joysticks))
# Enable music events
Audio.Music.setEndEvent(MusicFinished)
#Audio.Music.setEndEvent() #MFH - no event required?
# Custom key names
self.getSystemKeyName = pygame.key.name
pygame.key.name = self.getKeyName
self.midi = []
if haveMidi:
pygame.midi.init()
for i in range(pygame.midi.get_count()):
interface, name, is_input, is_output, is_opened = pygame.midi.get_device_info(i)
Log.debug("Found MIDI device: %s on %s" % (name, interface))
if not is_input:
Log.debug("MIDI device is not an input device.")
continue
try:
self.midi.append(pygame.midi.Input(i))
Log.debug("Device opened as device number %d." % len(self.midi))
except pygame.midi.MidiException:
Log.error("Error opening device for input.")
if len(self.midi) == 0:
Log.debug("No MIDI input ports found.")
else:
Log.notice("MIDI input support is not available; install at least pygame 1.9 to get it.")
def showMouse(self):
pygame.mouse.set_visible(True)
def hideMouse(self):
pygame.mouse.set_visible(False)
def reloadControls(self):
self.controls = Controls()
def pluginControls(self):
self.gameDrums = 0
self.gameGuitars = 0
self.gameMics = 0
Player.pluginControls(self.activeGameControls)
for i in self.activeGameControls:
if self.controls.type[i] == -1:
self.gameBots += 1
elif self.controls.type[i] in Player.DRUMTYPES:
self.gameDrums += 1
elif self.controls.type[i] in Player.MICTYPES:
self.gameMics += 1
elif self.controls.type[i] in Player.GUITARTYPES:
self.gameGuitars += 1
def getAnalogKill(self, player):
return self.controls.analogKill[self.activeGameControls[player]]
def getAnalogSP(self, player):
return self.controls.analogSP[self.activeGameControls[player]]
def getAnalogSPThresh(self, player):
return self.controls.analogSPThresh[self.activeGameControls[player]]
def getAnalogSPSense(self, player):
return self.controls.analogSPSense[self.activeGameControls[player]]
def getAnalogSlide(self, player):
return self.controls.analogSlide[self.activeGameControls[player]]
def getAnalogFX(self, player):
return self.controls.analogFX[self.activeGameControls[player]] #FIXME: Analog FX
def getTwoChord(self, player):
return self.controls.twoChord[self.activeGameControls[player]]
def disableKeyRepeat(self):
pygame.key.set_repeat(0, 0)
def enableKeyRepeat(self):
pygame.key.set_repeat(300, 30)
def addMouseListener(self, listener):
if not listener in self.mouseListeners:
self.mouseListeners.append(listener)
def removeMouseListener(self, listener):
if listener in self.mouseListeners:
self.mouseListeners.remove(listener)
def addKeyListener(self, listener, priority = False):
if priority:
if not listener in self.priorityKeyListeners:
self.priorityKeyListeners.append(listener)
else:
if not listener in self.keyListeners:
self.keyListeners.append(listener)
def removeKeyListener(self, listener):
if listener in self.keyListeners:
self.keyListeners.remove(listener)
if listener in self.priorityKeyListeners:
self.priorityKeyListeners.remove(listener)
def addSystemEventListener(self, listener):
if not listener in self.systemListeners:
self.systemListeners.append(listener)
def removeSystemEventListener(self, listener):
if listener in self.systemListeners:
self.systemListeners.remove(listener)
def broadcastEvent(self, listeners, function, *args):
for l in reversed(listeners):
if getattr(l, function)(*args):
return True
else:
return False
def broadcastSystemEvent(self, name, *args):
return self.broadcastEvent(self.systemListeners, name, *args)
def encodeMidiButton(self, midi, button):
return 0x40000 + (midi << 8 ) + button
def decodeMidiButton(self, id):
id -= 0x40000
return (id >> 8, id & 0xff)
def encodeJoystickButton(self, joystick, button):
return 0x10000 + (joystick << 8) + button
def encodeJoystickAxis(self, joystick, axis, end):
return 0x20000 + (joystick << 8) + (axis << 4) + end
def encodeJoystickHat(self, joystick, hat, pos):
v = int((pos[1] + 1) * 3 + (pos[0] + 1))
return 0x30000 + (joystick << 8) + (hat << 4) + v
def decodeJoystickButton(self, id):
id -= 0x10000
return (id >> 8, id & 0xff)
def decodeJoystickAxis(self, id):
id -= 0x20000
return (id >> 8, (id >> 4) & 0xf, id & 0xf)
def decodeJoystickHat(self, id):
id -= 0x30000
v = id & 0xf
x, y = (v % 3) - 1, (v / 3) - 1
return (id >> 8, (id >> 4) & 0xf, (x, y))
#myfingershurt: new function specifically for detecting an analog whammy input:
def getWhammyAxis(self, id):
if id < 0x30000 and id >= 0x20000:
joy, axis, end = self.decodeJoystickAxis(id)
return (True, joy, axis)
else:
return (False, 0, 0)
def getJoysticksUsed(self, keys):
midis = []
joys = []
for id in keys:
if id >= 0x40000:
midi, but = self.decodeMidiButton(id)
if midi not in midis:
midis.append(midi)
elif id >= 0x30000:
joy, axis, pos = self.decodeJoystickHat(id)
if joy not in joys:
joys.append(joy)
elif id >= 0x20000:
joy, axis, end = self.decodeJoystickAxis(id)
if joy not in joys:
joys.append(joy)
elif id >= 0x10000:
joy, but = self.decodeJoystickButton(id)
if joy not in joys:
joys.append(joy)
return [joys, midis]
def getKeyName(self, id):
if id >= 0x40000:
midi, but = self.decodeMidiButton(id)
return "Midi #%d-%d" % (midi + 1, but)
elif id >= 0x30000:
joy, axis, pos = self.decodeJoystickHat(id)
return "Joy #%d, hat %d %s" % (joy + 1, axis, pos)
elif id >= 0x20000:
joy, axis, end = self.decodeJoystickAxis(id)
return "Joy #%d, axis %d %s" % (joy + 1, axis, (end == 1) and "high" or "low")
elif id >= 0x10000:
joy, but = self.decodeJoystickButton(id)
return "Joy #%d, %s" % (joy + 1, chr(ord('A') + but))
return self.getSystemKeyName(id)
def run(self, ticks):
pygame.event.pump()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if not self.broadcastEvent(self.priorityKeyListeners, "keyPressed", event.key, event.unicode):
self.broadcastEvent(self.keyListeners, "keyPressed", event.key, event.unicode)
elif event.type == pygame.KEYUP:
if not self.broadcastEvent(self.priorityKeyListeners, "keyReleased", event.key):
self.broadcastEvent(self.keyListeners, "keyReleased", event.key)
elif event.type == pygame.MOUSEMOTION:
self.broadcastEvent(self.mouseListeners, "mouseMoved", event.pos, event.rel)
elif event.type == pygame.MOUSEBUTTONDOWN:
self.broadcastEvent(self.mouseListeners, "mouseButtonPressed", event.button, event.pos)
elif event.type == pygame.MOUSEBUTTONUP:
self.broadcastEvent(self.mouseListeners, "mouseButtonReleased", event.button, event.pos)
elif event.type == pygame.VIDEORESIZE:
self.broadcastEvent(self.systemListeners, "screenResized", event.size)
elif event.type == pygame.QUIT:
self.broadcastEvent(self.systemListeners, "quit")
elif event.type == pygame.ACTIVEEVENT: # akedrou - catch for pause onLoseFocus
if (event.state == 2 or event.state == 6) and event.gain == 0:
self.broadcastEvent(self.keyListeners, "lostFocus") # as a key event, since Scene clients don't handle system events
elif event.type == MusicFinished:
self.broadcastEvent(self.systemListeners, "musicFinished")
elif event.type == pygame.JOYBUTTONDOWN: # joystick buttons masquerade as keyboard events
id = self.encodeJoystickButton(event.joy, event.button)
if not self.broadcastEvent(self.priorityKeyListeners, "keyPressed", id, u'\x00'):
self.broadcastEvent(self.keyListeners, "keyPressed", id, u'\x00')
elif event.type == pygame.JOYBUTTONUP:
id = self.encodeJoystickButton(event.joy, event.button)
if not self.broadcastEvent(self.priorityKeyListeners, "keyReleased", id):
self.broadcastEvent(self.keyListeners, "keyReleased", id)
elif event.type == pygame.JOYAXISMOTION:
try:
threshold = .8
state = self.joystickAxes[event.joy][event.axis]
keyEvent = None
if event.value > threshold and state != 1:
self.joystickAxes[event.joy][event.axis] = 1
keyEvent = "keyPressed"
args = (self.encodeJoystickAxis(event.joy, event.axis, 1), u'\x00')
state = 1
elif event.value < -threshold and state != -1:
keyEvent = "keyPressed"
args = (self.encodeJoystickAxis(event.joy, event.axis, 0), u'\x00')
state = -1
elif state != 0:
keyEvent = "keyReleased"
args = (self.encodeJoystickAxis(event.joy, event.axis, (state == 1) and 1 or 0), )
state = 0
if keyEvent:
self.joystickAxes[event.joy][event.axis] = state
if not self.broadcastEvent(self.priorityKeyListeners, keyEvent, *args):
self.broadcastEvent(self.keyListeners, keyEvent, *args)
except KeyError:
pass
elif event.type == pygame.JOYHATMOTION:
try:
state = self.joystickHats[event.joy][event.hat]
keyEvent = None
# Stump's PS3 GH3 up-and-down-strumming patch
if event.value != (0, 0) and state != (0, 0):
keyEvent = "keyReleased"
args = (self.encodeJoystickHat(event.joy, event.hat, state), )
state = (0, 0)
pygame.event.post(event)
elif event.value != (0, 0) and state == (0, 0):
self.joystickHats[event.joy][event.hat] = event.value
keyEvent = "keyPressed"
args = (self.encodeJoystickHat(event.joy, event.hat, event.value), u'\x00')
state = event.value
else:
keyEvent = "keyReleased"
args = (self.encodeJoystickHat(event.joy, event.hat, state), )
state = (0, 0)
if keyEvent:
self.joystickHats[event.joy][event.hat] = state
if not self.broadcastEvent(self.priorityKeyListeners, keyEvent, *args):
self.broadcastEvent(self.keyListeners, keyEvent, *args)
except KeyError:
pass
for i, device in enumerate(self.midi):
while True:
data = device.read(1)
if len(data) > 0:
midimsg = data[0][0]
id = self.encodeMidiButton(i, midimsg[1])
#MFH - must check for 0x80 - 0x8F for Note Off events (keyReleased) and 0x90 - 0x9F for Note On events (keyPressed)
noteOn = False
noteOff = False
if (midimsg[0] >= 0x90) and (midimsg[0] <= 0x9F): #note ON range
if midimsg[2] > 0: #velocity > 0, confirmed note on
noteOn = True
else: #velocity is 0 - this is pretty much a note off.
noteOff = True
elif (midimsg[0] >= 0x80) and (midimsg[0] <= 0x8F): #note OFF range
noteOff = True
if noteOn:
if not self.broadcastEvent(self.priorityKeyListeners, "keyPressed", id, u'\x00'):
self.broadcastEvent(self.keyListeners, "keyPressed", id, u'\x00')
elif noteOff:
if not self.broadcastEvent(self.priorityKeyListeners, "keyReleased", id):
self.broadcastEvent(self.keyListeners, "keyReleased", id)
else:
break
# glorandwarf: check that there are no control conflicts #FIXME
def checkControls(self):
if self.controls.isKeyMappingOK() == False:
Log.warn("Conflicting player controls, resetting to defaults")
self.controls.restoreDefaultKeyMappings()
self.reloadControls()
# glorandwarf: sets the new key mapping and checks for a conflict
def setNewKeyMapping(self, section, option, key, control):
return Player.setNewKeyMapping(section, option, key, control)
| gpl-2.0 | -699,863,752,015,638,400 | 40.26484 | 136 | 0.541275 | false |
chenc10/Spark-PAF | dist/examples/src/main/python/cassandra_outputformat.py | 8 | 3287 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
from pyspark import SparkContext
"""
Create data in Cassandra fist
(following: https://wiki.apache.org/cassandra/GettingStarted)
cqlsh> CREATE KEYSPACE test
... WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };
cqlsh> use test;
cqlsh:test> CREATE TABLE users (
... user_id int PRIMARY KEY,
... fname text,
... lname text
... );
> cassandra_outputformat <host> test users 1745 john smith
> cassandra_outputformat <host> test users 1744 john doe
> cassandra_outputformat <host> test users 1746 john smith
cqlsh:test> SELECT * FROM users;
user_id | fname | lname
---------+-------+-------
1745 | john | smith
1744 | john | doe
1746 | john | smith
"""
if __name__ == "__main__":
if len(sys.argv) != 7:
print("""
Usage: cassandra_outputformat <host> <keyspace> <cf> <user_id> <fname> <lname>
Run with example jar:
./bin/spark-submit --driver-class-path /path/to/example/jar \
/path/to/examples/cassandra_outputformat.py <args>
Assumes you have created the following table <cf> in Cassandra already,
running on <host>, in <keyspace>.
cqlsh:<keyspace>> CREATE TABLE <cf> (
... user_id int PRIMARY KEY,
... fname text,
... lname text
... );
""", file=sys.stderr)
exit(-1)
host = sys.argv[1]
keyspace = sys.argv[2]
cf = sys.argv[3]
sc = SparkContext(appName="CassandraOutputFormat")
conf = {"cassandra.output.thrift.address": host,
"cassandra.output.thrift.port": "9160",
"cassandra.output.keyspace": keyspace,
"cassandra.output.partitioner.class": "Murmur3Partitioner",
"cassandra.output.cql": "UPDATE " + keyspace + "." + cf + " SET fname = ?, lname = ?",
"mapreduce.output.basename": cf,
"mapreduce.outputformat.class": "org.apache.cassandra.hadoop.cql3.CqlOutputFormat",
"mapreduce.job.output.key.class": "java.util.Map",
"mapreduce.job.output.value.class": "java.util.List"}
key = {"user_id": int(sys.argv[4])}
sc.parallelize([(key, sys.argv[5:])]).saveAsNewAPIHadoopDataset(
conf=conf,
keyConverter="org.apache.spark.examples.pythonconverters.ToCassandraCQLKeyConverter",
valueConverter="org.apache.spark.examples.pythonconverters.ToCassandraCQLValueConverter")
sc.stop()
| apache-2.0 | 769,814,413,818,522,500 | 36.352273 | 98 | 0.652571 | false |
scality/cinder | cinder/tests/unit/objects/test_volume.py | 1 | 10553 | # Copyright 2015 SimpliVity Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import context
from cinder import exception
from cinder import objects
from cinder.tests.unit import fake_volume
from cinder.tests.unit import objects as test_objects
class TestVolume(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.volume_glance_metadata_get', return_value={})
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
def test_get_by_id(self, volume_get, volume_glance_metadata_get):
db_volume = fake_volume.fake_db_volume()
volume_get.return_value = db_volume
volume = objects.Volume.get_by_id(self.context, 1)
self._compare(self, db_volume, volume)
volume_get.assert_called_once_with(self.context, 1)
@mock.patch('cinder.db.sqlalchemy.api.model_query')
def test_get_by_id_no_existing_id(self, model_query):
pf = model_query().options().options().options().options().options()
pf.filter_by().first.return_value = None
self.assertRaises(exception.VolumeNotFound,
objects.Volume.get_by_id, self.context, 123)
@mock.patch('cinder.db.volume_create')
def test_create(self, volume_create):
db_volume = fake_volume.fake_db_volume()
volume_create.return_value = db_volume
volume = objects.Volume(context=self.context)
volume.create()
self.assertEqual(db_volume['id'], volume.id)
@mock.patch('cinder.db.volume_update')
def test_save(self, volume_update):
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.display_name = 'foobar'
volume.save()
volume_update.assert_called_once_with(self.context, volume.id,
{'display_name': 'foobar'})
@mock.patch('cinder.db.volume_metadata_update',
return_value={'key1': 'value1'})
@mock.patch('cinder.db.volume_update')
def test_save_with_metadata(self, volume_update, metadata_update):
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.display_name = 'foobar'
volume.metadata = {'key1': 'value1'}
self.assertEqual({'display_name': 'foobar',
'metadata': {'key1': 'value1'}},
volume.obj_get_changes())
volume.save()
volume_update.assert_called_once_with(self.context, volume.id,
{'display_name': 'foobar'})
metadata_update.assert_called_once_with(self.context, volume.id,
{'key1': 'value1'}, True)
@mock.patch('cinder.db.volume_admin_metadata_update',
return_value={'key1': 'value1'})
@mock.patch('cinder.db.volume_update')
def test_save_with_admin_metadata(self, volume_update,
admin_metadata_update):
# Test with no admin context
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.admin_metadata = {'key1': 'value1'}
volume.save()
self.assertFalse(admin_metadata_update.called)
# Test with admin context
admin_context = context.RequestContext(self.user_id, self.project_id,
is_admin=True)
volume = objects.Volume._from_db_object(admin_context,
objects.Volume(), db_volume)
volume.admin_metadata = {'key1': 'value1'}
volume.save()
admin_metadata_update.assert_called_once_with(
admin_context, volume.id, {'key1': 'value1'}, True)
@mock.patch('cinder.db.volume_destroy')
def test_destroy(self, volume_destroy):
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.destroy()
self.assertTrue(volume_destroy.called)
admin_context = volume_destroy.call_args[0][0]
self.assertTrue(admin_context.is_admin)
def test_obj_fields(self):
volume = objects.Volume(context=self.context, id=2, _name_id=2)
self.assertEqual(['name', 'name_id'], volume.obj_extra_fields)
self.assertEqual('volume-2', volume.name)
self.assertEqual('2', volume.name_id)
def test_obj_field_previous_status(self):
volume = objects.Volume(context=self.context,
previous_status='backing-up')
self.assertEqual('backing-up', volume.previous_status)
@mock.patch('cinder.db.volume_metadata_delete')
def test_delete_metadata_key(self, metadata_delete):
volume = objects.Volume(self.context, id=1)
volume.metadata = {'key1': 'value1', 'key2': 'value2'}
self.assertEqual({}, volume._orig_metadata)
volume.delete_metadata_key('key2')
self.assertEqual({'key1': 'value1'}, volume.metadata)
metadata_delete.assert_called_once_with(self.context, '1', 'key2')
@mock.patch('cinder.db.volume_metadata_get')
@mock.patch('cinder.db.volume_admin_metadata_get')
@mock.patch('cinder.objects.volume_type.VolumeType.get_by_id')
@mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.'
'get_all_by_volume_id')
def test_obj_load_attr(self, mock_va_get_all_by_vol, mock_vt_get_by_id,
mock_admin_metadata_get, mock_metadata_get):
volume = objects.Volume._from_db_object(
self.context, objects.Volume(), fake_volume.fake_db_volume())
# Test metadata lazy-loaded field
metadata = {'foo': 'bar'}
mock_metadata_get.return_value = metadata
self.assertEqual(metadata, volume.metadata)
mock_metadata_get.assert_called_once_with(self.context, volume.id)
# Test volume_type lazy-loaded field
volume_type = objects.VolumeType(context=self.context, id=5)
mock_vt_get_by_id.return_value = volume_type
self.assertEqual(volume_type, volume.volume_type)
mock_vt_get_by_id.assert_called_once_with(self.context,
volume.volume_type_id)
# Test volume_attachment lazy-loaded field
va_objs = [objects.VolumeAttachment(context=self.context, id=i)
for i in [3, 4, 5]]
va_list = objects.VolumeAttachmentList(context=self.context,
objects=va_objs)
mock_va_get_all_by_vol.return_value = va_list
self.assertEqual(va_list, volume.volume_attachment)
mock_va_get_all_by_vol.assert_called_once_with(self.context, volume.id)
# Test admin_metadata lazy-loaded field - user context
adm_metadata = {'bar': 'foo'}
mock_admin_metadata_get.return_value = adm_metadata
self.assertEqual({}, volume.admin_metadata)
self.assertFalse(mock_admin_metadata_get.called)
# Test admin_metadata lazy-loaded field - admin context
adm_context = self.context.elevated()
volume = objects.Volume._from_db_object(adm_context, objects.Volume(),
fake_volume.fake_db_volume())
adm_metadata = {'bar': 'foo'}
mock_admin_metadata_get.return_value = adm_metadata
self.assertEqual(adm_metadata, volume.admin_metadata)
mock_admin_metadata_get.assert_called_once_with(adm_context, volume.id)
class TestVolumeList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.volume_glance_metadata_get', return_value={})
@mock.patch('cinder.db.volume_get_all')
def test_get_all(self, volume_get_all, volume_glance_metadata_get):
db_volume = fake_volume.fake_db_volume()
volume_get_all.return_value = [db_volume]
volumes = objects.VolumeList.get_all(self.context,
mock.sentinel.marker,
mock.sentinel.limit,
mock.sentinel.sort_key,
mock.sentinel.sort_dir)
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
@mock.patch('cinder.db.volume_get_all_by_host')
def test_get_by_host(self, get_all_by_host):
db_volume = fake_volume.fake_db_volume()
get_all_by_host.return_value = [db_volume]
volumes = objects.VolumeList.get_all_by_host(
self.context, 'fake-host')
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
@mock.patch('cinder.db.volume_get_all_by_group')
def test_get_by_group(self, get_all_by_group):
db_volume = fake_volume.fake_db_volume()
get_all_by_group.return_value = [db_volume]
volumes = objects.VolumeList.get_all_by_group(
self.context, 'fake-host')
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
@mock.patch('cinder.db.volume_get_all_by_project')
def test_get_by_project(self, get_all_by_project):
db_volume = fake_volume.fake_db_volume()
get_all_by_project.return_value = [db_volume]
volumes = objects.VolumeList.get_all_by_project(
self.context, mock.sentinel.project_id, mock.sentinel.marker,
mock.sentinel.limit, mock.sentinel.sorted_keys,
mock.sentinel.sorted_dirs, mock.sentinel.filters)
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
| apache-2.0 | 499,737,695,942,082,300 | 46.111607 | 79 | 0.608453 | false |
turbokongen/home-assistant | tests/components/switch/test_device_trigger.py | 3 | 8224 | """The test for switch device automation."""
from datetime import timedelta
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.switch import DOMAIN
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_fire_time_changed,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a switch."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert triggers == expected_triggers
async def test_get_trigger_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a switch trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, "trigger", trigger
)
assert capabilities == expected_capabilities
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "turn_off device - {} - on - off - None".format(
ent1.entity_id
)
hass.states.async_set(ent1.entity_id, STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "turn_on device - {} - off - on - None".format(
ent1.entity_id
)
async def test_if_fires_on_state_change_with_for(hass, calls):
"""Test for triggers firing with delay."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_off",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert calls[0].data["some"] == "turn_off device - {} - on - off - 0:00:05".format(
ent1.entity_id
)
| apache-2.0 | -5,122,585,954,553,566,000 | 33.995745 | 87 | 0.482004 | false |
tensorflow/tensorflow | tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.py | 14 | 2435 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""XLA LocalClient interface for interacting with TPUs via the TPU driver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
# Import xla_client to load shared C++ extensions (just CompileOptions at the
# time of writing).
from tensorflow.compiler.xla.python import xla_client # pylint: disable=unused-import
from tensorflow.compiler.xla.python.tpu_driver.client import tpu_client_extension as _tpu_client
class TpuBackend(object):
"""XLA backend implemented using the Tpu driver API."""
# Cache the backends to prevent double driver initializations.
_local_backend = None
@staticmethod
def create(worker=None, force=False):
"""Constructs a Cloud TPU backend."""
# `force` == True will skip caching any backends (if applicable) and will
# always try to create a new client.
if worker is None:
raise ValueError(
'Failed to create TpuBackend. The `worker` parameter must not be '
'`None`. Use `local` to connect to a local TPU or '
'`grpc://host:port` to connect to a remote TPU.')
if worker == 'local' or 'local://' in worker:
# We usually want to cache for local backends to prevent double
# initialization, except where `force` == True.
if worker == 'local':
worker = 'local://'
if force:
return _tpu_client.TpuClient.Get(worker)
if TpuBackend._local_backend is None:
logging.info('Starting the local TPU driver.')
TpuBackend._local_backend = _tpu_client.TpuClient.Get(worker)
return TpuBackend._local_backend
else:
# We do not cache for non-local backends.
return _tpu_client.TpuClient.Get(worker)
| apache-2.0 | 992,158,606,129,420,400 | 40.271186 | 96 | 0.685421 | false |
KureFM/Xenon | src/packages/rsa/pkcs1.py | 75 | 13170 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions for PKCS#1 version 1.5 encryption and signing
This module implements certain functionality from PKCS#1 version 1.5. For a
very clear example, read http://www.di-mgt.com.au/rsa_alg.html#pkcs1schemes
At least 8 bytes of random padding is used when encrypting a message. This makes
these methods much more secure than the ones in the ``rsa`` module.
WARNING: this module leaks information when decryption or verification fails.
The exceptions that are raised contain the Python traceback information, which
can be used to deduce where in the process the failure occurred. DO NOT PASS
SUCH INFORMATION to your users.
'''
import hashlib
import os
from rsa._compat import b
from rsa import common, transform, core, varblock
# ASN.1 codes that describe the hash algorithm used.
HASH_ASN1 = {
'MD5': b('\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10'),
'SHA-1': b('\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'),
'SHA-256': b('\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20'),
'SHA-384': b('\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30'),
'SHA-512': b('\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40'),
}
HASH_METHODS = {
'MD5': hashlib.md5,
'SHA-1': hashlib.sha1,
'SHA-256': hashlib.sha256,
'SHA-384': hashlib.sha384,
'SHA-512': hashlib.sha512,
}
class CryptoError(Exception):
'''Base class for all exceptions in this module.'''
class DecryptionError(CryptoError):
'''Raised when decryption fails.'''
class VerificationError(CryptoError):
'''Raised when verification fails.'''
def _pad_for_encryption(message, target_length):
r'''Pads the message for encryption, returning the padded message.
:return: 00 02 RANDOM_DATA 00 MESSAGE
>>> block = _pad_for_encryption('hello', 16)
>>> len(block)
16
>>> block[0:2]
'\x00\x02'
>>> block[-6:]
'\x00hello'
'''
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError('%i bytes needed for message, but there is only'
' space for %i' % (msglength, max_msglength))
# Get random padding
padding = b('')
padding_length = target_length - msglength - 3
# We remove 0-bytes, so we'll end up with less padding than we've asked for,
# so keep adding data until we're at the correct length.
while len(padding) < padding_length:
needed_bytes = padding_length - len(padding)
# Always read at least 8 bytes more than we need, and trim off the rest
# after removing the 0-bytes. This increases the chance of getting
# enough bytes, especially when needed_bytes is small
new_padding = os.urandom(needed_bytes + 5)
new_padding = new_padding.replace(b('\x00'), b(''))
padding = padding + new_padding[:needed_bytes]
assert len(padding) == padding_length
return b('').join([b('\x00\x02'),
padding,
b('\x00'),
message])
def _pad_for_signing(message, target_length):
r'''Pads the message for signing, returning the padded message.
The padding is always a repetition of FF bytes.
:return: 00 01 PADDING 00 MESSAGE
>>> block = _pad_for_signing('hello', 16)
>>> len(block)
16
>>> block[0:2]
'\x00\x01'
>>> block[-6:]
'\x00hello'
>>> block[2:-6]
'\xff\xff\xff\xff\xff\xff\xff\xff'
'''
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError('%i bytes needed for message, but there is only'
' space for %i' % (msglength, max_msglength))
padding_length = target_length - msglength - 3
return b('').join([b('\x00\x01'),
padding_length * b('\xff'),
b('\x00'),
message])
def encrypt(message, pub_key):
'''Encrypts the given message using PKCS#1 v1.5
:param message: the message to encrypt. Must be a byte string no longer than
``k-11`` bytes, where ``k`` is the number of bytes needed to encode
the ``n`` component of the public key.
:param pub_key: the :py:class:`rsa.PublicKey` to encrypt with.
:raise OverflowError: when the message is too large to fit in the padded
block.
>>> from rsa import key, common
>>> (pub_key, priv_key) = key.newkeys(256)
>>> message = 'hello'
>>> crypto = encrypt(message, pub_key)
The crypto text should be just as long as the public key 'n' component:
>>> len(crypto) == common.byte_size(pub_key.n)
True
'''
keylength = common.byte_size(pub_key.n)
padded = _pad_for_encryption(message, keylength)
payload = transform.bytes2int(padded)
encrypted = core.encrypt_int(payload, pub_key.e, pub_key.n)
block = transform.int2bytes(encrypted, keylength)
return block
def decrypt(crypto, priv_key):
r'''Decrypts the given message using PKCS#1 v1.5
The decryption is considered 'failed' when the resulting cleartext doesn't
start with the bytes 00 02, or when the 00 byte between the padding and
the message cannot be found.
:param crypto: the crypto text as returned by :py:func:`rsa.encrypt`
:param priv_key: the :py:class:`rsa.PrivateKey` to decrypt with.
:raise DecryptionError: when the decryption fails. No details are given as
to why the code thinks the decryption fails, as this would leak
information about the private key.
>>> import rsa
>>> (pub_key, priv_key) = rsa.newkeys(256)
It works with strings:
>>> crypto = encrypt('hello', pub_key)
>>> decrypt(crypto, priv_key)
'hello'
And with binary data:
>>> crypto = encrypt('\x00\x00\x00\x00\x01', pub_key)
>>> decrypt(crypto, priv_key)
'\x00\x00\x00\x00\x01'
Altering the encrypted information will *likely* cause a
:py:class:`rsa.pkcs1.DecryptionError`. If you want to be *sure*, use
:py:func:`rsa.sign`.
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.DecryptionError` exception. It shows where in the
code the exception occurred, and thus leaks information about the key.
It's only a tiny bit of information, but every bit makes cracking the
keys easier.
>>> crypto = encrypt('hello', pub_key)
>>> crypto = crypto[0:5] + 'X' + crypto[6:] # change a byte
>>> decrypt(crypto, priv_key)
Traceback (most recent call last):
...
DecryptionError: Decryption failed
'''
blocksize = common.byte_size(priv_key.n)
encrypted = transform.bytes2int(crypto)
decrypted = core.decrypt_int(encrypted, priv_key.d, priv_key.n)
cleartext = transform.int2bytes(decrypted, blocksize)
# If we can't find the cleartext marker, decryption failed.
if cleartext[0:2] != b('\x00\x02'):
raise DecryptionError('Decryption failed')
# Find the 00 separator between the padding and the message
try:
sep_idx = cleartext.index(b('\x00'), 2)
except ValueError:
raise DecryptionError('Decryption failed')
return cleartext[sep_idx+1:]
def sign(message, priv_key, hash):
'''Signs the message with the private key.
Hashes the message, then signs the hash with the given key. This is known
as a "detached signature", because the message itself isn't altered.
:param message: the message to sign. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param priv_key: the :py:class:`rsa.PrivateKey` to sign with
:param hash: the hash method used on the message. Use 'MD5', 'SHA-1',
'SHA-256', 'SHA-384' or 'SHA-512'.
:return: a message signature block.
:raise OverflowError: if the private key is too small to contain the
requested hash.
'''
# Get the ASN1 code for this hash method
if hash not in HASH_ASN1:
raise ValueError('Invalid hash method: %s' % hash)
asn1code = HASH_ASN1[hash]
# Calculate the hash
hash = _hash(message, hash)
# Encrypt the hash with the private key
cleartext = asn1code + hash
keylength = common.byte_size(priv_key.n)
padded = _pad_for_signing(cleartext, keylength)
payload = transform.bytes2int(padded)
encrypted = core.encrypt_int(payload, priv_key.d, priv_key.n)
block = transform.int2bytes(encrypted, keylength)
return block
def verify(message, signature, pub_key):
'''Verifies that the signature matches the message.
The hash method is detected automatically from the signature.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param signature: the signature block, as created with :py:func:`rsa.sign`.
:param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
:raise VerificationError: when the signature doesn't match the message.
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.VerificationError` exception. It shows where in
the code the exception occurred, and thus leaks information about the
key. It's only a tiny bit of information, but every bit makes cracking
the keys easier.
'''
blocksize = common.byte_size(pub_key.n)
encrypted = transform.bytes2int(signature)
decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
clearsig = transform.int2bytes(decrypted, blocksize)
# If we can't find the signature marker, verification failed.
if clearsig[0:2] != b('\x00\x01'):
raise VerificationError('Verification failed')
# Find the 00 separator between the padding and the payload
try:
sep_idx = clearsig.index(b('\x00'), 2)
except ValueError:
raise VerificationError('Verification failed')
# Get the hash and the hash method
(method_name, signature_hash) = _find_method_hash(clearsig[sep_idx+1:])
message_hash = _hash(message, method_name)
# Compare the real hash to the hash in the signature
if message_hash != signature_hash:
raise VerificationError('Verification failed')
return True
def _hash(message, method_name):
'''Returns the message digest.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param method_name: the hash method, must be a key of
:py:const:`HASH_METHODS`.
'''
if method_name not in HASH_METHODS:
raise ValueError('Invalid hash method: %s' % method_name)
method = HASH_METHODS[method_name]
hasher = method()
if hasattr(message, 'read') and hasattr(message.read, '__call__'):
# read as 1K blocks
for block in varblock.yield_fixedblocks(message, 1024):
hasher.update(block)
else:
# hash the message object itself.
hasher.update(message)
return hasher.digest()
def _find_method_hash(method_hash):
'''Finds the hash method and the hash itself.
:param method_hash: ASN1 code for the hash method concatenated with the
hash itself.
:return: tuple (method, hash) where ``method`` is the used hash method, and
``hash`` is the hash itself.
:raise VerificationFailed: when the hash method cannot be found
'''
for (hashname, asn1code) in HASH_ASN1.items():
if not method_hash.startswith(asn1code):
continue
return (hashname, method_hash[len(asn1code):])
raise VerificationError('Verification failed')
__all__ = ['encrypt', 'decrypt', 'sign', 'verify',
'DecryptionError', 'VerificationError', 'CryptoError']
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count and count % 100 == 0:
print('%i times' % count)
print('Doctests done')
| mit | -7,070,300,595,042,206,000 | 32.680307 | 97 | 0.644848 | false |
CloudServer/cinder | cinder/volume/drivers/zfssa/zfssarest.py | 2 | 39133 | # Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ZFS Storage Appliance Proxy
"""
import json
from oslo_log import log
from cinder import exception
from cinder.i18n import _, _LE
from cinder.volume.drivers.zfssa import restclient
from cinder.volume.drivers.zfssa import webdavclient
LOG = log.getLogger(__name__)
class ZFSSAApi(object):
"""ZFSSA API proxy class"""
def __init__(self):
self.host = None
self.url = None
self.rclient = None
def __del__(self):
if self.rclient and self.rclient.islogin():
self.rclient.logout()
def _is_pool_owned(self, pdata):
"""Returns True if the pool's owner is the same as the host."""
svc = '/api/system/v1/version'
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error getting version: '
'svc: %(svc)s.'
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'svc': svc,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
vdata = json.loads(ret.data)
return vdata['version']['asn'] == pdata['pool']['asn'] and \
vdata['version']['nodename'] == pdata['pool']['owner']
def set_host(self, host, timeout=None):
self.host = host
self.url = "https://" + self.host + ":215"
self.rclient = restclient.RestClientURL(self.url, timeout=timeout)
def login(self, auth_str):
"""Login to the appliance"""
if self.rclient and not self.rclient.islogin():
self.rclient.login(auth_str)
def get_pool_stats(self, pool):
"""Get pool stats.
Get space available and total properties of a pool
returns (avail, total).
"""
svc = '/api/storage/v1/pools/' + pool
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting Pool Stats: '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.InvalidVolume(reason=exception_msg)
val = json.loads(ret.data)
if not self._is_pool_owned(val):
LOG.error(_LE('Error Pool ownership: Pool %(pool)s is not owned '
'by %(host)s.'),
{'pool': pool, 'host': self.host})
raise exception.InvalidInput(reason=pool)
avail = val['pool']['usage']['available']
total = val['pool']['usage']['total']
return avail, total
def create_project(self, pool, project, compression=None, logbias=None):
"""Create a project on a pool.
Check first whether the pool exists.
"""
self.verify_pool(pool)
svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/storage/v1/pools/' + pool + '/projects'
arg = {
'name': project
}
if compression and compression != '':
arg.update({'compression': compression})
if logbias and logbias != '':
arg.update({'logbias': logbias})
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating Project: '
'%(project)s on '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'project': project,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_initiator(self, initiator, alias, chapuser=None,
chapsecret=None):
"""Create an iSCSI initiator."""
svc = '/api/san/v1/iscsi/initiators/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/san/v1/iscsi/initiators'
arg = {
'initiator': initiator,
'alias': alias
}
if chapuser and chapuser != '' and chapsecret and chapsecret != '':
arg.update({'chapuser': chapuser,
'chapsecret': chapsecret})
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating Initiator: '
'%(initiator)s on '
'Alias: %(alias)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiator': initiator,
'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def add_to_initiatorgroup(self, initiator, initiatorgroup):
"""Add an iSCSI initiator to initiatorgroup"""
svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/san/v1/iscsi/initiator-groups'
arg = {
'name': initiatorgroup,
'initiators': [initiator]
}
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Adding Initiator: '
'%(initiator)s on group'
'InitiatorGroup: %(initiatorgroup)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiator': initiator,
'initiatorgroup': initiatorgroup,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
else:
val = json.loads(ret.data)
inits = val['group']['initiators']
if inits is None:
exception_msg = (_('Error Getting Initiators: '
'InitiatorGroup: %(initiatorgroup)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiatorgroup': initiatorgroup,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
if initiator in inits:
return
inits.append(initiator)
svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
arg = {
'initiators': inits
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Adding Initiator: '
'%(initiator)s on group'
'InitiatorGroup: %(initiatorgroup)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiator': initiator,
'initiatorgroup': initiatorgroup,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_target(self, alias, interfaces=None, tchapuser=None,
tchapsecret=None):
"""Create an iSCSI target.
:param interfaces: an array with network interfaces
:param tchapuser, tchapsecret: target's chapuser and chapsecret
:returns: target iqn
"""
svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/san/v1/iscsi/targets'
arg = {
'alias': alias
}
if tchapuser and tchapuser != '' and tchapsecret and \
tchapsecret != '':
arg.update({'targetchapuser': tchapuser,
'targetchapsecret': tchapsecret,
'auth': 'chap'})
if interfaces is not None and len(interfaces) > 0:
arg.update({'interfaces': interfaces})
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating Target: '
'%(alias)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['target']['iqn']
def get_target(self, alias):
"""Get an iSCSI target iqn."""
svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting Target: '
'%(alias)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['target']['iqn']
def add_to_targetgroup(self, iqn, targetgroup):
"""Add an iSCSI target to targetgroup."""
svc = '/api/san/v1/iscsi/target-groups/' + targetgroup
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svccrt = '/api/san/v1/iscsi/target-groups'
arg = {
'name': targetgroup,
'targets': [iqn]
}
ret = self.rclient.post(svccrt, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating TargetGroup: '
'%(targetgroup)s with'
'IQN: %(iqn)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'targetgroup': targetgroup,
'iqn': iqn,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
return
arg = {
'targets': [iqn]
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Adding to TargetGroup: '
'%(targetgroup)s with'
'IQN: %(iqn)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'targetgroup': targetgroup,
'iqn': iqn,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_pool(self, pool):
"""Checks whether pool exists."""
svc = '/api/storage/v1/pools/' + pool
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying Pool: '
'%(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_project(self, pool, project):
"""Checks whether project exists."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Project: %(project)s on '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'project': project,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_initiator(self, iqn):
"""Check whether initiator iqn exists."""
svc = '/api/san/v1/iscsi/initiators/' + iqn
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Initiator: %(iqn)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'initiator': iqn,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_target(self, alias):
"""Check whether target alias exists."""
svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Target: %(alias)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_lun(self, pool, project, lun, volsize, targetgroup, specs):
"""Create a LUN.
specs - contains volume properties (e.g blocksize, compression).
"""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns'
arg = {
'name': lun,
'volsize': volsize,
'targetgroup': targetgroup,
'initiatorgroup': 'com.sun.ms.vss.hg.maskAll'
}
if specs:
arg.update(specs)
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Volume: %(lun)s '
'Size: %(size)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'lun': lun,
'size': volsize,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val
def get_lun(self, pool, project, lun):
"""return iscsi lun properties."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + "/luns/" + lun
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting '
'Volume: %(lun)s on '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
ret = {
'guid': val['lun']['lunguid'],
'number': val['lun']['assignednumber'],
'initiatorgroup': val['lun']['initiatorgroup'],
'size': val['lun']['volsize'],
'nodestroy': val['lun']['nodestroy']
}
if 'origin' in val['lun']:
ret.update({'origin': val['lun']['origin']})
return ret
def set_lun_initiatorgroup(self, pool, project, lun, initiatorgroup):
"""Set the initiatorgroup property of a LUN."""
if initiatorgroup == '':
initiatorgroup = 'com.sun.ms.vss.hg.maskAll'
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun
arg = {
'initiatorgroup': initiatorgroup
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
LOG.error(_LE('Error Setting Volume: %(lun)s to InitiatorGroup: '
'%(initiatorgroup)s Pool: %(pool)s Project: '
'%(project)s Return code: %(ret.status)d Message: '
'%(ret.data)s.'),
{'lun': lun,
'initiatorgroup': initiatorgroup,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
def delete_lun(self, pool, project, lun):
"""delete iscsi lun."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
LOG.error(_LE('Error Deleting Volume: %(lun)s to Pool: %(pool)s '
'Project: %(project)s Return code: %(ret.status)d '
'Message: %(ret.data)s.'),
{'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
def create_snapshot(self, pool, project, lun, snapshot):
"""create snapshot."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots'
arg = {
'name': snapshot
}
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Snapshot: %(snapshot)s on'
'Volume: %(lun)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def delete_snapshot(self, pool, project, lun, snapshot):
"""delete snapshot."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots/' + snapshot
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
exception_msg = (_('Error Deleting '
'Snapshot: %(snapshot)s on '
'Volume: %(lun)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def clone_snapshot(self, pool, project, lun, snapshot, clone):
"""clone snapshot."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots/' + snapshot + '/clone'
arg = {
'project': project,
'share': clone,
'nodestroy': True
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Cloning '
'Snapshot: %(snapshot)s on '
'Volume: %(lun)s of '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def set_lun_props(self, pool, project, lun, **kargs):
"""set lun properties."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun
if kargs is None:
return
ret = self.rclient.put(svc, kargs)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Setting props '
'Props: %(props)s on '
'Volume: %(lun)s of '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'props': kargs,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def has_clones(self, pool, project, lun, snapshot):
"""Checks whether snapshot has clones or not."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots/' + snapshot
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting '
'Snapshot: %(snapshot)s on '
'Volume: %(lun)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['snapshot']['numclones'] != 0
def get_initiator_initiatorgroup(self, initiator):
"""Returns the initiator group of the initiator."""
groups = []
svc = "/api/san/v1/iscsi/initiator-groups"
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
msg = _('Error getting initiator groups.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
val = json.loads(ret.data)
for initiator_group in val['groups']:
if initiator in initiator_group['initiators']:
groups.append(initiator_group["name"])
if len(groups) == 0:
LOG.debug("Initiator group not found. Attaching volume to "
"default initiator group.")
groups.append('default')
return groups
class ZFSSANfsApi(ZFSSAApi):
"""ZFSSA API proxy class for NFS driver"""
projects_path = '/api/storage/v1/pools/%s/projects'
project_path = projects_path + '/%s'
shares_path = project_path + '/filesystems'
share_path = shares_path + '/%s'
share_snapshots_path = share_path + '/snapshots'
share_snapshot_path = share_snapshots_path + '/%s'
services_path = '/api/service/v1/services/'
def __init__(self, *args, **kwargs):
super(ZFSSANfsApi, self).__init__(*args, **kwargs)
self.webdavclient = None
def set_webdav(self, https_path, auth_str):
self.webdavclient = webdavclient.ZFSSAWebDAVClient(https_path,
auth_str)
def verify_share(self, pool, project, share):
"""Checks whether the share exists"""
svc = self.share_path % (pool, project, share)
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'share: %(share)s on '
'Project: %(project)s and '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'share': share,
'project': project,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_snapshot(self, pool, project, share, snapshot):
"""create snapshot of a share"""
svc = self.share_snapshots_path % (pool, project, share)
arg = {
'name': snapshot
}
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Snapshot: %(snapshot)s on'
'share: %(share)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'share': share,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def delete_snapshot(self, pool, project, share, snapshot):
"""delete snapshot of a share"""
svc = self.share_snapshot_path % (pool, project, share, snapshot)
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
exception_msg = (_('Error Deleting '
'Snapshot: %(snapshot)s on '
'Share: %(share)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'share': share,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_snapshot_of_volume_file(self, src_file="", dst_file=""):
src_file = '.zfs/snapshot/' + src_file
return self.webdavclient.request(src_file=src_file, dst_file=dst_file,
method='COPY')
def delete_snapshot_of_volume_file(self, src_file=""):
return self.webdavclient.request(src_file=src_file, method='DELETE')
def create_volume_from_snapshot_file(self, src_file="", dst_file="",
method='COPY'):
return self.webdavclient.request(src_file=src_file, dst_file=dst_file,
method=method)
def _change_service_state(self, service, state=''):
svc = self.services_path + service + '/' + state
ret = self.rclient.put(svc)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Verifying '
'Service: %(service)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'service': service,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
LOG.debug('%(service)s service state: %(data)s',
{'service': service, 'data': data})
status = 'online' if state == 'enable' else 'disabled'
if data['<status>'] != status:
exception_msg = (_('%(service)s Service is not %(status)s '
'on storage appliance: %(host)s')
% {'service': service,
'status': status,
'host': self.host})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def enable_service(self, service):
self._change_service_state(service, state='enable')
self.verify_service(service)
def disable_service(self, service):
self._change_service_state(service, state='disable')
self.verify_service(service, status='offline')
def verify_service(self, service, status='online'):
"""Checks whether a service is online or not"""
svc = self.services_path + service
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Service: %(service)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'service': service,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
if data['<status>'] != status:
exception_msg = (_('%(service)s Service is not %(status)s '
'on storage appliance: %(host)s')
% {'service': service,
'status': status,
'host': self.host})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def modify_service(self, service, edit_args=None):
"""Edit service properties"""
if edit_args is None:
edit_args = {}
svc = self.services_path + service
ret = self.rclient.put(svc, edit_args)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error modifying '
'Service: %(service)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'service': service,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
LOG.debug('Modify %(service)s service '
'return data: %(data)s',
{'service': service,
'data': data})
def create_share(self, pool, project, share, args):
"""Create a share in the specified pool and project"""
svc = self.share_path % (pool, project, share)
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = self.shares_path % (pool, project)
args.update({'name': share})
ret = self.rclient.post(svc, args)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Share: %(name)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'name': share,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
else:
LOG.debug('Editing properties of a pre-existing share')
ret = self.rclient.put(svc, args)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error editing share: '
'%(share)s on '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'share': share,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def get_share(self, pool, project, share):
"""return share properties"""
svc = self.share_path % (pool, project, share)
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting '
'Share: %(share)s on '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'share': share,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['filesystem']
| apache-2.0 | 8,506,971,684,102,821,000 | 43.118377 | 79 | 0.450157 | false |
kubeflow/kfp-tekton | sdk/python/kfp_tekton/_client.py | 1 | 2688 | # Copyright 2020 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from datetime import datetime
from typing import Mapping, Callable
import kfp
from .compiler import TektonCompiler
class TektonClient(kfp.Client):
"""Tekton API Client for Kubeflow Pipelines."""
def create_run_from_pipeline_func(self,
pipeline_func: Callable,
arguments: Mapping[str, str],
run_name=None,
experiment_name=None,
pipeline_conf: kfp.dsl.PipelineConf = None,
namespace=None):
"""Runs pipeline on Kubernetes cluster with Kubeflow Pipelines Tekton backend.
This command compiles the pipeline function, creates or gets an experiment and
submits the pipeline for execution.
:param pipeline_func: A function that describes a pipeline by calling components
and composing them into execution graph.
:param arguments: Arguments to the pipeline function provided as a dict.
:param run_name: Optional. Name of the run to be shown in the UI.
:param experiment_name: Optional. Name of the experiment to add the run to.
:param pipeline_conf: Optional. Pipeline configuration.
:param namespace: kubernetes namespace where the pipeline runs are created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized
:return: RunPipelineResult
"""
# TODO: Check arguments against the pipeline function
pipeline_name = pipeline_func.__name__
run_name = run_name or pipeline_name + ' ' + datetime.now().strftime('%Y-%m-%d %H-%M-%S')
try:
(_, pipeline_package_path) = tempfile.mkstemp(suffix='.zip')
TektonCompiler().compile(pipeline_func, pipeline_package_path, pipeline_conf=pipeline_conf)
return self.create_run_from_pipeline_package(pipeline_package_path, arguments,
run_name, experiment_name, namespace)
finally:
os.remove(pipeline_package_path)
| apache-2.0 | -3,953,413,530,550,328,300 | 41.666667 | 97 | 0.667783 | false |
IrregularShed/newoldtv | pal.py | 1 | 8583 | #!/usr/bin/env python
# The GIMP PAL plugin - PAL effect plugin for The GIMP.
# Copyright (C) 2009 Dave Jeffery <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gimpfu import *
gettext.install("gimp20-python", gimp.locale_directory, unicode=True)
# Constants definitions
PAL_WIDTH = 720
PAL_HEIGHT = 576
PAL_BLACK = (0, 0, 0)
PAL_S = 1.0
PAL_D = 0.5
def pal(img, layer, add_border, add_interlace, pal_y_scale,
down_interpol, up_interpol):
"""Simulates PAL encoding on an image. To do this it uses the source
image to make two new images: a high resolution "luminance" image and a
low resolution "chrominance" image. Then it combines these two images to
give you the finished result.
The method used by this plug-in is explained in more detail on my blog:
http://kecskebak.blogspot.com/2009/09/tapeheads-revisited.html
There are some additional features such as adding the borders to the edge
of the image, a crude 'interlace' effect and a choice of PAL encoding
systems.
PAL-D averages out the colour of adjacent lines, so to simulate this I
simply halve the vertical resolution when creating the luminance and
chrominance images.
The plug-in makes scales the source image 720 x 576 before it begins."""
gimp.context_push()
img.undo_group_start()
# Scale image to PAL size (720 x 576)
scale_to_pal(img, down_interpol, up_interpol)
# Add PAL border if required
if add_border:
layer = add_pal_border(img, layer)
# Work out image scaling
width = layer.width
height = layer.height
chrominance_width = width / 3
chrominance_height = height * pal_y_scale
luminance_width = width - chrominance_width
luminance_height = height * pal_y_scale
# Luminance layer
luminance_layer = layer
# Create a chrominance layer
chrominance_layer = layer.copy(1)
img.add_layer(chrominance_layer)
pdb.gimp_layer_set_mode(chrominance_layer, ADDITION_MODE)
# Apply levels to luminance layer
adjust_levels(luminance_layer, 76, 150, 29)
# Apply levels to chrominance layer
adjust_levels(chrominance_layer, 179, 105, 226)
# Scale luminance layer
scale(luminance_layer, luminance_width, luminance_height,
down_interpol)
scale(luminance_layer, width, height, up_interpol)
# Scale chrominance layer
scale(chrominance_layer, chrominance_width, chrominance_height,
down_interpol)
scale(chrominance_layer, width, height, up_interpol)
# Merge chrominance and luminance layers
layer = pdb.gimp_image_merge_down(img, chrominance_layer, CLIP_TO_IMAGE)
# Add interlace effect if required
if add_interlace:
layer = add_interlace_effect(img, layer)
img.undo_group_end()
gimp.context_pop()
def scale(layer, new_width, new_height, interpolation):
local_origin = False
pdb.gimp_layer_scale_full(layer, new_width, new_height, local_origin,
interpolation)
def is_pal_sized(image):
return image.width == PAL_WIDTH and image.height == PAL_HEIGHT
def scale_to_pal(image, down_interpol, up_interpol):
"""Scales image to standard PAL size - 720 x 576 pixels.
If the image is bigger, use the user specified downscaling method,
otherwise use the user specified upscaling method."""
# Check to make sure image is not 720 x 576 already
if is_pal_sized(image):
return
# Choose which interpolation method to use to scale image
if image.width > PAL_WIDTH:
interpolation = down_interpol
else:
interpolation = up_interpol
# Scale image
pdb.gimp_image_scale_full(image, PAL_WIDTH, PAL_HEIGHT, interpolation)
def adjust_levels(layer, r, g, b):
low_input = 0
high_input = 255
gamma = 1.0
low_output = 0
pdb.gimp_levels(layer , HISTOGRAM_RED, low_input, high_input,
gamma, low_output, r)
pdb.gimp_levels(layer , HISTOGRAM_GREEN, low_input, high_input,
gamma, low_output, g)
pdb.gimp_levels(layer , HISTOGRAM_BLUE, low_input, high_input,
gamma, low_output, b)
def add_pal_border(image, layer):
"""Adds a black border to the area of layer that would not contain PAL
picture information on a real PAL screen grab.
The position parameter is important, so the black border is added to the
correct layer.
Returns the new layer created as a result of the border layer being merged
with layer."""
# Create a new layer above layer
opacity = 100
position = pdb.gimp_image_get_layer_position(image, layer)
new_layer = pdb.gimp_layer_new(image, PAL_WIDTH, PAL_HEIGHT, RGBA_IMAGE,
"PAL border", opacity, NORMAL_MODE)
pdb.gimp_image_add_layer(image, new_layer, position)
gimp.set_foreground(PAL_BLACK)
pdb.gimp_edit_fill(new_layer, FOREGROUND_FILL)
# Cut out hole from new layer
BOR_WIDTH = 702
BOR_HEIGHT = 576
BOR_X = 9
BOR_Y = 0
feather = True
feather_radius = 2.0
pdb.gimp_rect_select(image, BOR_X, BOR_Y, BOR_WIDTH, BOR_HEIGHT,
CHANNEL_OP_REPLACE, feather, feather_radius)
pdb.gimp_edit_cut(new_layer)
pdb.gimp_selection_none(image)
# Merge layer with current image
return pdb.gimp_image_merge_down(image, new_layer, CLIP_TO_IMAGE)
def add_interlace_effect(image, layer):
"""Creates an interlace style effect.
Returns the new layer that results from adding the effect."""
# Create a new duplicate layer above the existing one
add_alpha = True
position = pdb.gimp_image_get_layer_position(image, layer)
new_layer = pdb.gimp_layer_copy(layer, add_alpha)
pdb.gimp_image_add_layer(image, new_layer, position)
# Shift screen lines on the new layer
dy = 0
dx = 1
feather = False
feather_radius = 0.0
line_width = new_layer.width
line_height = 1
start_x = 0
for start_y in range(0, new_layer.height, 2):
pdb.gimp_rect_select(image, start_x, start_y, line_width, line_height,
CHANNEL_OP_REPLACE, feather, feather_radius)
float_layer = pdb.gimp_selection_float(new_layer, dx, dy)
pdb.gimp_floating_sel_anchor(float_layer)
pdb.gimp_selection_none(image)
# Apply Gaussian Blue to new layer
horizontal = 1.0
vertical = 1.0
method = 1 # No constants available IIR = 0, RLE = 1
pdb.plug_in_gauss(image, new_layer, horizontal, vertical, method)
# Merge the new layer with the original layer
return pdb.gimp_image_merge_down(image, new_layer, CLIP_TO_IMAGE)
register(
"python-fu-pal",
N_("Makes image look PAL encoded."),
"Makes image look PAL encoded.",
"Dave Jeffery",
"Dave Jeffery",
"2009",
N_("_PAL..."),
"RGB*, GRAY*",
[
(PF_IMAGE, "image", _("Input image"), None),
(PF_DRAWABLE, "drawable", _("Input drawable"), None),
(PF_TOGGLE, "add_border", _("Add PAL border?"), True),
(PF_TOGGLE, "add_interlace", _("Add interlace effect?"), True),
(PF_RADIO, "pal_y_scale", _("PAL version"), 1.0,
((_("PAL-S (Simple PAL)"), PAL_S),
(_("PAL-D"), PAL_D))),
(PF_RADIO, "down_interpol", _("Down-scaling interpolation method"), 2,
((_("None"), INTERPOLATION_NONE),
(_("Linear"), INTERPOLATION_LINEAR),
(_("Cubic"), INTERPOLATION_CUBIC),
(_("Sinc Lanczos"), INTERPOLATION_LANCZOS))),
(PF_RADIO, "up_interpol", _("Up-scaling interpolation method"), 3,
((_("None"), INTERPOLATION_NONE),
(_("Linear"), INTERPOLATION_LINEAR),
(_("Cubic"), INTERPOLATION_CUBIC),
(_("Sinc Lanczos"), INTERPOLATION_LANCZOS)))
],
[],
pal,
menu="<Image>/Filters/Artistic",
domain=("gimp20-python", gimp.locale_directory)
)
main()
| gpl-3.0 | 2,651,493,329,082,831,000 | 33.748988 | 79 | 0.654433 | false |
calancha/DIRAC | ConfigurationSystem/Client/CSAPI.py | 1 | 22652 | ########################################################################
# $HeadURL$
########################################################################
__RCSID__ = "$Id$"
import types
from DIRAC.ConfigurationSystem.private.Modificator import Modificator
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Utilities import List
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.Security import Locations
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
class CSAPI:
def __init__( self ):
"""
Initialization function
"""
self.__csModified = False
self.__baseSecurity = "/Registry"
self.__userDN = ''
self.__userGroup = ''
self.__rpcClient = None
self.__csMod = None
self.__initialized = S_ERROR( "Not initialized" )
self.initialize()
if not self.__initialized[ 'OK' ]:
gLogger.error( self.__initialized )
def __getProxyID( self ):
proxyLocation = Locations.getProxyLocation()
if not proxyLocation:
gLogger.error( "No proxy found!" )
return False
chain = X509Chain()
if not chain.loadProxyFromFile( proxyLocation ):
gLogger.error( "Can't read proxy!", proxyLocation )
return False
retVal = chain.getIssuerCert()
if not retVal[ 'OK' ]:
gLogger.error( "Can't parse proxy!", retVal[ 'Message' ] )
return False
idCert = retVal[ 'Value' ]
self.__userDN = idCert.getSubjectDN()[ 'Value' ]
self.__userGroup = chain.getDIRACGroup()[ 'Value' ]
return True
def __getCertificateID( self ):
certLocation = Locations.getHostCertificateAndKeyLocation()
if not certLocation:
gLogger.error( "No certificate found!" )
return False
chain = X509Chain()
retVal = chain.loadChainFromFile( certLocation[ 0 ] )
if not retVal[ 'OK' ]:
gLogger.error( "Can't parse certificate!", retVal[ 'Message' ] )
return False
idCert = chain.getIssuerCert()[ 'Value' ]
self.__userDN = idCert.getSubjectDN()[ 'Value' ]
self.__userGroup = 'host'
return True
def initialize( self ):
if self.__initialized[ 'OK' ]:
return self.__initialized
if not gConfig.useServerCertificate():
res = self.__getProxyID()
else:
res = self.__getCertificateID()
if not res:
self.__initialized = S_ERROR( "Cannot locate client credentials" )
return self.__initialized
retVal = gConfig.getOption( "/DIRAC/Configuration/MasterServer" )
if not retVal[ 'OK' ]:
self.__initialized = S_ERROR( "Master server is not known. Is everything initialized?" )
return self.__initialized
self.__rpcClient = RPCClient( gConfig.getValue( "/DIRAC/Configuration/MasterServer", "" ) )
self.__csMod = Modificator( self.__rpcClient, "%s - %s" % ( self.__userGroup, self.__userDN ) )
retVal = self.downloadCSData()
if not retVal[ 'OK' ]:
self.__initialized = S_ERROR( "Can not download the remote cfg. Is everything initialized?" )
return self.__initialized
self.__initialized = S_OK()
return self.__initialized
def downloadCSData( self ):
if not self.__csMod:
return S_ERROR( "CSAPI not yet initialized" )
result = self.__csMod.loadFromRemote()
if not result[ 'OK' ]:
return result
self.__csModified = False
self.__csMod.updateGConfigurationData()
return S_OK()
def listUsers( self , group = False ):
if not self.__initialized[ 'OK' ]:
return self.__initialized
if not group:
return S_OK( self.__csMod.getSections( "%s/Users" % self.__baseSecurity ) )
else:
users = self.__csMod.getValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ) )
if not users:
return S_OK( [] )
else:
return S_OK( List.fromChar( users ) )
def listHosts( self ):
if not self.__initialized[ 'OK' ]:
return self.__initialized
return S_OK( self.__csMod.getSections( "%s/Hosts" % self.__baseSecurity ) )
def describeUsers( self, users = False ):
if not self.__initialized[ 'OK' ]:
return self.__initialized
return S_OK( self.__describeEntity( users ) )
def describeHosts( self, hosts = False ):
if not self.__initialized[ 'OK' ]:
return self.__initialized
return S_OK( self.__describeEntity( hosts, True ) )
def __describeEntity( self, mask, hosts = False ):
if hosts:
csSection = "%s/Hosts" % self.__baseSecurity
else:
csSection = "%s/Users" % self.__baseSecurity
if mask:
entities = [ entity for entity in self.__csMod.getSections( csSection ) if entity in mask ]
else:
entities = self.__csMod.getSections( csSection )
entitiesDict = {}
for entity in entities:
entitiesDict[ entity ] = {}
for option in self.__csMod.getOptions( "%s/%s" % ( csSection, entity ) ):
entitiesDict[ entity ][ option ] = self.__csMod.getValue( "%s/%s/%s" % ( csSection, entity, option ) )
if not hosts:
groupsDict = self.describeGroups()[ 'Value' ]
entitiesDict[ entity ][ 'Groups' ] = []
for group in groupsDict:
if 'Users' in groupsDict[ group ] and entity in groupsDict[ group ][ 'Users' ]:
entitiesDict[ entity ][ 'Groups' ].append( group )
entitiesDict[ entity ][ 'Groups' ].sort()
return entitiesDict
def listGroups( self ):
"""
List all groups
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
return S_OK( self.__csMod.getSections( "%s/Groups" % self.__baseSecurity ) )
def describeGroups( self, mask = False ):
"""
List all groups that are in the mask (or all if no mask) with their properties
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
groups = [ group for group in self.__csMod.getSections( "%s/Groups" % self.__baseSecurity ) if not mask or ( mask and group in mask ) ]
groupsDict = {}
for group in groups:
groupsDict[ group ] = {}
for option in self.__csMod.getOptions( "%s/Groups/%s" % ( self.__baseSecurity, group ) ):
groupsDict[ group ][ option ] = self.__csMod.getValue( "%s/Groups/%s/%s" % ( self.__baseSecurity, group, option ) )
if option in ( "Users", "Properties" ):
groupsDict[ group ][ option ] = List.fromChar( groupsDict[ group ][ option ] )
return S_OK( groupsDict )
def deleteUsers( self, users ):
"""
Delete a user/s can receive as a param either a string or a list
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
if type( users ) == types.StringType:
users = [ users ]
usersData = self.describeUsers( users )['Value']
for username in users:
if not username in usersData:
gLogger.warn( "User %s does not exist" )
continue
userGroups = usersData[ username ][ 'Groups' ]
for group in userGroups:
self.__removeUserFromGroup( group, username )
gLogger.info( "Deleted user %s from group %s" % ( username, group ) )
self.__csMod.removeSection( "%s/Users/%s" % ( self.__baseSecurity, username ) )
gLogger.info( "Deleted user %s" % username )
self.__csModified = True
return S_OK( True )
def __removeUserFromGroup( self, group, username ):
"""
Remove user from a group
"""
usersInGroup = self.__csMod.getValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ) )
if usersInGroup != None:
userList = List.fromChar( usersInGroup, "," )
userPos = userList.index( username )
userList.pop( userPos )
self.__csMod.setOptionValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ), ",".join( userList ) )
def __addUserToGroup( self, group, username ):
"""
Add user to a group
"""
usersInGroup = self.__csMod.getValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ) )
if usersInGroup != None:
userList = List.fromChar( usersInGroup )
if username not in userList:
userList.append( username )
self.__csMod.setOptionValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ), ",".join( userList ) )
else:
gLogger.warn( "User %s is already in group %s" % ( username, group ) )
def addUser( self, username, properties ):
"""
Add a user to the cs
- username
- properties is a dict with keys:
- DN
- groups
- <extra params>
Returns True/False
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
for prop in ( "DN", "Groups" ):
if prop not in properties:
gLogger.error( "Missing %s property for user %s" % ( prop, username ) )
return S_OK( False )
if username in self.listUsers()['Value']:
gLogger.error( "User %s is already registered" % username )
return S_OK( False )
groups = self.listGroups()['Value']
for userGroup in properties[ 'Groups' ]:
if not userGroup in groups:
gLogger.error( "User %s group %s is not a valid group" % ( username, userGroup ) )
return S_OK( False )
self.__csMod.createSection( "%s/Users/%s" % ( self.__baseSecurity, username ) )
for prop in properties:
if prop == "Groups":
continue
self.__csMod.setOptionValue( "%s/Users/%s/%s" % ( self.__baseSecurity, username, prop ), properties[ prop ] )
for userGroup in properties[ 'Groups' ]:
gLogger.info( "Added user %s to group %s" % ( username, userGroup ) )
self.__addUserToGroup( userGroup, username )
gLogger.info( "Registered user %s" % username )
self.__csModified = True
return S_OK( True )
def modifyUser( self, username, properties, createIfNonExistant = False ):
"""
Modify a user
- username
- properties is a dict with keys:
- DN
- Groups
- <extra params>
Returns True/False
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
modifiedUser = False
userData = self.describeUsers( [ username ] )['Value']
if username not in userData:
if createIfNonExistant:
gLogger.info( "Registering user %s" % username )
return self.addUser( username, properties )
gLogger.error( "User %s is not registered" % username )
return S_OK( False )
for prop in properties:
if prop == "Groups":
continue
prevVal = self.__csMod.getValue( "%s/Users/%s/%s" % ( self.__baseSecurity, username, prop ) )
if not prevVal or prevVal != properties[ prop ]:
gLogger.info( "Setting %s property for user %s to %s" % ( prop, username, properties[ prop ] ) )
self.__csMod.setOptionValue( "%s/Users/%s/%s" % ( self.__baseSecurity, username, prop ), properties[ prop ] )
modifiedUser = True
if 'Groups' in properties:
groups = self.listGroups()['Value']
for userGroup in properties[ 'Groups' ]:
if not userGroup in groups:
gLogger.error( "User %s group %s is not a valid group" % ( username, userGroup ) )
return S_OK( False )
groupsToBeDeletedFrom = []
groupsToBeAddedTo = []
for prevGroup in userData[ username ][ 'Groups' ]:
if prevGroup not in properties[ 'Groups' ]:
groupsToBeDeletedFrom.append( prevGroup )
modifiedUser = True
for newGroup in properties[ 'Groups' ]:
if newGroup not in userData[ username ][ 'Groups' ]:
groupsToBeAddedTo.append( newGroup )
modifiedUser = True
for group in groupsToBeDeletedFrom:
self.__removeUserFromGroup( group, username )
gLogger.info( "Removed user %s from group %s" % ( username, group ) )
for group in groupsToBeAddedTo:
self.__addUserToGroup( group, username )
gLogger.info( "Added user %s to group %s" % ( username, group ) )
if modifiedUser:
gLogger.info( "Modified user %s" % username )
self.__csModified = True
else:
gLogger.info( "Nothing to modify for user %s" % username )
return S_OK( True )
def addGroup( self, groupname, properties ):
"""
Add a group to the cs
- groupname
- properties is a dict with keys:
- Users
- Properties
- <extra params>
Returns True/False
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
if groupname in self.listGroups()['Value']:
gLogger.error( "Group %s is already registered" % groupname )
return S_OK( False )
self.__csMod.createSection( "%s/Groups/%s" % ( self.__baseSecurity, groupname ) )
for prop in properties:
self.__csMod.setOptionValue( "%s/Groups/%s/%s" % ( self.__baseSecurity, groupname, prop ), properties[ prop ] )
gLogger.info( "Registered group %s" % groupname )
self.__csModified = True
return S_OK( True )
def modifyGroup( self, groupname, properties, createIfNonExistant = False ):
"""
Modify a user
- groupname
- properties is a dict with keys:
- Users
- Properties
- <extra params>
Returns True/False
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
modifiedGroup = False
groupData = self.describeGroups( [ groupname ] )['Value']
if groupname not in groupData:
if createIfNonExistant:
gLogger.info( "Registering group %s" % groupname )
return self.addGroup( groupname, properties )
gLogger.error( "Group %s is not registered" % groupname )
return S_OK( False )
for prop in properties:
prevVal = self.__csMod.getValue( "%s/Groups/%s/%s" % ( self.__baseSecurity, groupname, prop ) )
if not prevVal or prevVal != properties[ prop ]:
gLogger.info( "Setting %s property for group %s to %s" % ( prop, groupname, properties[ prop ] ) )
self.__csMod.setOptionValue( "%s/Groups/%s/%s" % ( self.__baseSecurity, groupname, prop ), properties[ prop ] )
modifiedGroup = True
if modifiedGroup:
gLogger.info( "Modified group %s" % groupname )
self.__csModified = True
else:
gLogger.info( "Nothing to modify for group %s" % groupname )
return S_OK( True )
def addHost( self, hostname, properties ):
"""
Add a host to the cs
- hostname
- properties is a dict with keys:
- DN
- Properties
- <extra params>
Returns True/False
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
for prop in ( "DN", ):
if prop not in properties:
gLogger.error( "Missing %s property for host %s" % ( prop, hostname ) )
return S_OK( False )
if hostname in self.listHosts()['Value']:
gLogger.error( "Host %s is already registered" % hostname )
return S_OK( False )
self.__csMod.createSection( "%s/Hosts/%s" % ( self.__baseSecurity, hostname ) )
for prop in properties:
self.__csMod.setOptionValue( "%s/Hosts/%s/%s" % ( self.__baseSecurity, hostname, prop ), properties[ prop ] )
gLogger.info( "Registered host %s" % hostname )
self.__csModified = True
return S_OK( True )
def modifyHost( self, hostname, properties, createIfNonExistant = False ):
"""
Modify a user
- hostname
- properties is a dict with keys:
- DN
- Properties
- <extra params>
Returns True/False
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
modifiedHost = False
hostData = self.describeHosts( [ hostname ] )['Value']
if hostname not in hostData:
if createIfNonExistant:
gLogger.info( "Registering host %s" % hostname )
return self.addHost( hostname, properties )
gLogger.error( "Host %s is not registered" % hostname )
return S_OK( False )
for prop in properties:
prevVal = self.__csMod.getValue( "%s/Hosts/%s/%s" % ( self.__baseSecurity, hostname, prop ) )
if not prevVal or prevVal != properties[ prop ]:
gLogger.info( "Setting %s property for host %s to %s" % ( prop, hostname, properties[ prop ] ) )
self.__csMod.setOptionValue( "%s/Hosts/%s/%s" % ( self.__baseSecurity, hostname, prop ), properties[ prop ] )
modifiedHost = True
if modifiedHost:
gLogger.info( "Modified host %s" % hostname )
self.__csModified = True
else:
gLogger.info( "Nothing to modify for host %s" % hostname )
return S_OK( True )
def syncUsersWithCFG( self, usersCFG ):
"""
Sync users with the cfg contents. Usernames have to be sections containing
DN, Groups, and extra properties as parameters
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
done = True
for user in usersCFG.listSections():
properties = {}
propList = usersCFG[ user ].listOptions()
for prop in propList:
if prop == "Groups":
properties[ prop ] = List.fromChar( usersCFG[ user ][ prop ] )
else:
properties[ prop ] = usersCFG[ user ][ prop ]
if not self.modifyUser( user, properties, createIfNonExistant = True ):
done = False
return S_OK( done )
def sortUsersAndGroups( self ):
self.__csMod.sortAlphabetically( "%s/Users" % self.__baseSecurity )
self.__csMod.sortAlphabetically( "%s/Hosts" % self.__baseSecurity )
for group in self.__csMod.getSections( "%s/Groups" % self.__baseSecurity ):
usersOptionPath = "%s/Groups/%s/Users" % ( self.__baseSecurity, group )
users = self.__csMod.getValue( usersOptionPath )
usersList = List.fromChar( users )
usersList.sort()
sortedUsers = ", ".join( usersList )
if users != sortedUsers:
self.__csMod.setOptionValue( usersOptionPath, sortedUsers )
def checkForUnexistantUsersInGroups( self ):
allUsers = self.__csMod.getSections( "%s/Users" % self.__baseSecurity )
allGroups = self.__csMod.getSections( "%s/Groups" % self.__baseSecurity )
for group in allGroups:
usersInGroup = self.__csMod.getValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ) )
if usersInGroup:
filteredUsers = []
usersInGroup = List.fromChar( usersInGroup )
for user in usersInGroup:
if user in allUsers:
filteredUsers.append( user )
self.__csMod.setOptionValue( "%s/Groups/%s/Users" % ( self.__baseSecurity, group ),
",".join( filteredUsers ) )
def commitChanges( self, sortUsers = True ):
if not self.__initialized[ 'OK' ]:
return self.__initialized
if self.__csModified:
self.checkForUnexistantUsersInGroups()
if sortUsers:
self.sortUsersAndGroups()
retVal = self.__csMod.commit()
if not retVal[ 'OK' ]:
gLogger.error( "Can't commit new configuration data", "%s" % retVal[ 'Message' ] )
return retVal
return self.downloadCSData()
return S_OK()
def commit( self ):
""" Commit the accumulated changes to the CS server
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
if self.__csModified:
retVal = self.__csMod.commit()
if not retVal[ 'OK' ]:
gLogger.error( "Can't commit new configuration data", "%s" % retVal[ 'Message' ] )
return retVal
return self.downloadCSData()
return S_OK()
def mergeFromCFG( self, cfg ):
""" Merge the internal CFG data with the input
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
self.__csMod.mergeFromCFG( cfg )
self.__csModified = True
return S_OK()
def modifyValue( self, optionPath, newValue ):
"""Modify an existing value at the specified options path.
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
prevVal = self.__csMod.getValue( optionPath )
if not prevVal:
return S_ERROR( 'Trying to set %s to %s but option does not exist' % ( optionPath, newValue ) )
gLogger.verbose( "Changing %s from \n%s \nto \n%s" % ( optionPath, prevVal, newValue ) )
self.__csMod.setOptionValue( optionPath, newValue )
self.__csModified = True
return S_OK( 'Modified %s' % optionPath )
def setOption( self, optionPath, optionValue ):
"""Create an option at the specified path.
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
self.__csMod.setOptionValue( optionPath, optionValue )
self.__csModified = True
return S_OK( 'Created new option %s = %s' % ( optionPath, optionValue ) )
def setOptionComment( self, optionPath, comment ):
"""Create an option at the specified path.
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
self.__csMod.setComment( optionPath, comment )
self.__csModified = True
return S_OK( 'Set option comment %s : %s' % ( optionPath, comment ) )
def delOption( self, optionPath ):
""" Delete an option
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
if not self.__csMod.removeOption( optionPath ):
return S_ERROR( "Couldn't delete option %s" % optionPath )
self.__csModified = True
return S_OK( 'Deleted option %s' % ( optionPath ) )
def createSection( self, sectionPath, comment = "" ):
""" Create a new section
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
self.__csMod.createSection( sectionPath )
self.__csModified = True
if comment:
self.__csMod.setComment( sectionPath, comment )
return S_OK()
def delSection( self, sectionPath ):
""" Delete a section
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
if not self.__csMod.removeSection( sectionPath ):
return S_ERROR( "Could not delete section %s " % sectionPath )
self.__csModified = True
return S_OK()
def mergeCFGUnderSection( self, sectionPath, cfg ):
""" Merge the given cfg under a certain section
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
result = self.createSection( sectionPath )
if not result[ 'OK' ]:
return result
if not self.__csMod.mergeSectionFromCFG( sectionPath, cfg ):
return S_ERROR( "Could not merge cfg into section %s" % sectionPath )
self.__csModified = True
return S_OK()
def mergeWithCFG( self, cfg ):
""" Merge the given cfg with the current config
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
self.__csMod.mergeFromCFG( cfg )
self.__csModified = True
return S_OK()
def getCurrentCFG( self ):
""" Get the current CFG as it is
"""
if not self.__initialized[ 'OK' ]:
return self.__initialized
return S_OK( self.__csMod.getCFG() )
| gpl-3.0 | -5,572,035,235,934,651,000 | 36.816361 | 139 | 0.614118 | false |
Vauxoo/server-tools | excel_import_export/models/xlsx_report.py | 2 | 2264 | # Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
class XLSXReport(models.AbstractModel):
""" Common class for xlsx reporting wizard """
_name = 'xlsx.report'
_description = 'Excel Report AbstractModel'
name = fields.Char(
string='File Name',
readonly=True,
size=500,
)
data = fields.Binary(
string='File',
readonly=True,
)
template_id = fields.Many2one(
'xlsx.template',
string='Template',
required=True,
ondelete='cascade',
domain=lambda self: self._context.get('template_domain', []),
)
choose_template = fields.Boolean(
string='Allow Choose Template',
default=False,
)
state = fields.Selection(
[('choose', 'Choose'),
('get', 'Get')],
default='choose',
help="* Choose: wizard show in user selection mode"
"\n* Get: wizard show results from user action",
)
@api.model
def default_get(self, fields):
template_domain = self._context.get('template_domain', [])
templates = self.env['xlsx.template'].search(template_domain)
if not templates:
raise ValidationError(_('No template found'))
defaults = super(XLSXReport, self).default_get(fields)
for template in templates:
if not template.datas:
raise ValidationError(_('No file in %s') % (template.name,))
defaults['template_id'] = len(templates) == 1 and templates.id or False
return defaults
@api.multi
def report_xlsx(self):
self.ensure_one()
Export = self.env['xlsx.export']
out_file, out_name = \
Export.export_xlsx(self.template_id, self._name, self.id)
self.write({'state': 'get', 'data': out_file, 'name': out_name})
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'view_mode': 'form',
'view_type': 'form',
'res_id': self.id,
'views': [(False, 'form')],
'target': 'new',
}
| agpl-3.0 | -9,115,504,572,418,933,000 | 31.811594 | 79 | 0.569346 | false |
nkrinner/nova | nova/scheduler/filters/aggregate_multitenancy_isolation.py | 2 | 1983 | # Copyright (c) 2011-2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class AggregateMultiTenancyIsolation(filters.BaseHostFilter):
"""Isolate tenants in specific aggregates."""
# Aggregate data and tenant do not change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties):
"""If a host is in an aggregate that has the metadata key
"filter_tenant_id" it can only create instances from that tenant(s).
A host can be in different aggregates.
If a host doesn't belong to an aggregate with the metadata key
"filter_tenant_id" it can create instances from all tenants.
"""
spec = filter_properties.get('request_spec', {})
props = spec.get('instance_properties', {})
tenant_id = props.get('project_id')
context = filter_properties['context'].elevated()
metadata = db.aggregate_metadata_get_by_host(context, host_state.host,
key="filter_tenant_id")
if metadata != {}:
if tenant_id not in metadata["filter_tenant_id"]:
LOG.debug("%s fails tenant id on aggregate", host_state)
return False
return True
| apache-2.0 | 6,100,110,404,066,099,000 | 39.469388 | 78 | 0.666162 | false |
quizlet/grpc | tools/buildgen/plugins/expand_bin_attrs.py | 14 | 1646 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Buildgen expand binary attributes plugin.
This fills in any optional attributes.
"""
def mako_plugin(dictionary):
"""The exported plugin code for expand_filegroups.
The list of libs in the build.yaml file can contain "filegroups" tags.
These refer to the filegroups in the root object. We will expand and
merge filegroups on the src, headers and public_headers properties.
"""
targets = dictionary.get('targets')
default_platforms = ['windows', 'posix', 'linux', 'mac']
for tgt in targets:
tgt['flaky'] = tgt.get('flaky', False)
tgt['platforms'] = sorted(tgt.get('platforms', default_platforms))
tgt['ci_platforms'] = sorted(tgt.get('ci_platforms', tgt['platforms']))
tgt['boringssl'] = tgt.get('boringssl', False)
tgt['zlib'] = tgt.get('zlib', False)
tgt['ares'] = tgt.get('ares', False)
tgt['gtest'] = tgt.get('gtest', False)
libs = dictionary.get('libs')
for lib in libs:
lib['boringssl'] = lib.get('boringssl', False)
lib['zlib'] = lib.get('zlib', False)
lib['ares'] = lib.get('ares', False)
| apache-2.0 | -7,936,551,944,769,618,000 | 34.021277 | 75 | 0.696233 | false |
IITBinterns13/edx-platform-dev | lms/djangoapps/instructor_task/tasks_helper.py | 5 | 19005 | """
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
from time import time
from sys import exc_info
from traceback import format_exc
from celery import current_task
from celery.utils.log import get_task_logger
from celery.signals import worker_process_init
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.db import transaction
from dogapi import dog_stats_api
from xmodule.modulestore.django import modulestore
import mitxmako.middleware as middleware
from track.views import task_track
from courseware.models import StudentModule
from courseware.model_data import ModelDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_task.models import InstructorTask, PROGRESS
# define different loggers for use within tasks and on client side
TASK_LOG = get_task_logger(__name__)
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
def initialize_mako(sender=None, conf=None, **kwargs):
"""
Get mako templates to work on celery worker server's worker thread.
The initialization of Mako templating is usually done when Django is
initializing middleware packages as part of processing a server request.
When this is run on a celery worker server, no such initialization is
called.
To make sure that we don't load this twice (just in case), we look for the
result: the defining of the lookup paths for templates.
"""
if 'main' not in middleware.lookup:
TASK_LOG.info("Initializing Mako middleware explicitly")
middleware.MakoMiddleware()
# Actually make the call to define the hook:
worker_process_init.connect(initialize_mako)
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""Stub to make it easier to test without actually running Celery"""
return current_task
def _perform_module_state_update(course_id, module_state_key, student_identifier, update_fcn, action_name, filter_fcn,
xmodule_instance_args):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
# get start time for task:
start_time = time()
# find the problem descriptor:
module_descriptor = modulestore().get_instance(course_id, module_state_key)
# find the module in question
modules_to_update = StudentModule.objects.filter(course_id=course_id,
module_state_key=module_state_key)
# give the option of rescoring an individual student. If not specified,
# then rescores all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
# perform the main loop
num_updated = 0
num_attempted = 0
num_total = modules_to_update.count()
def get_task_progress():
"""Return a dict containing info about current task"""
current_time = time()
progress = {'action_name': action_name,
'attempted': num_attempted,
'updated': num_updated,
'total': num_total,
'duration_ms': int((current_time - start_time) * 1000),
}
return progress
task_progress = get_task_progress()
_get_current_task().update_state(state=PROGRESS, meta=task_progress)
for module_to_update in modules_to_update:
num_attempted += 1
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=['action:{name}'.format(name=action_name)]):
if update_fcn(module_descriptor, module_to_update, xmodule_instance_args):
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
num_updated += 1
# update task status:
task_progress = get_task_progress()
_get_current_task().update_state(state=PROGRESS, meta=task_progress)
return task_progress
def update_problem_module_state(entry_id, update_fcn, action_name, filter_fcn,
xmodule_instance_args):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This function
updates the entry on success and failure of the _perform_module_state_update function it
wraps. It is setting the entry's value for task_state based on what Celery would set it to once
the task returns to Celery: FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to _perform_module_state_update, and documented there.
If no exceptions are raised, a dict containing the task's result is returned, with the following keys:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Before returning, this is also JSON-serialized and stored in the task_output column of the InstructorTask entry.
If an exception is raised internally, it is caught and recorded in the InstructorTask entry.
This is also a JSON-serialized dict, stored in the task_output column, containing the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Once the exception is caught, it is raised again and allowed to pass up to the
task-running level, so that it can also set the failure modes and capture the error trace in the
result object that Celery creates.
"""
# get the InstructorTask to be updated. If this fails, then let the exception return to Celery.
# There's no point in catching it here.
entry = InstructorTask.objects.get(pk=entry_id)
# get inputs to use in this task from the entry:
task_id = entry.task_id
course_id = entry.course_id
task_input = json.loads(entry.task_input)
module_state_key = task_input.get('problem_url')
student_ident = task_input['student'] if 'student' in task_input else None
fmt = 'Starting to update problem modules as task "{task_id}": course "{course_id}" problem "{state_key}": nothing {action} yet'
TASK_LOG.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, action=action_name))
# add task_id to xmodule_instance_args, so that it can be output with tracking info:
if xmodule_instance_args is not None:
xmodule_instance_args['task_id'] = task_id
# Now that we have an entry we can try to catch failures:
task_progress = None
try:
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = 'Requested task "{task_id}" did not match actual task "{actual_id}"'
message = fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, actual_id=request_task_id)
TASK_LOG.error(message)
raise UpdateProblemModuleStateError(message)
# Now do the work:
with dog_stats_api.timer('instructor_tasks.module.time.overall', tags=['action:{name}'.format(name=action_name)]):
task_progress = _perform_module_state_update(course_id, module_state_key, student_ident, update_fcn,
action_name, filter_fcn, xmodule_instance_args)
# If we get here, we assume we've succeeded, so update the InstructorTask entry in anticipation.
# But we do this within the try, in case creating the task_output causes an exception to be
# raised.
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
except Exception:
# try to write out the failure to the entry before failing
_, exception, traceback = exc_info()
traceback_string = format_exc(traceback) if traceback is not None else ''
TASK_LOG.warning("background task (%s) failed: %s %s", task_id, exception, traceback_string)
entry.task_output = InstructorTask.create_output_for_failure(exception, traceback_string)
entry.task_state = FAILURE
entry.save_now()
raise
# log and exit, returning task_progress info as task result:
fmt = 'Finishing task "{task_id}": course "{course_id}" problem "{state_key}": final: {progress}'
TASK_LOG.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, progress=task_progress))
return task_progress
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(student, module_descriptor, model_data_cache, course_id,
make_track_function(), xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type)
@transaction.autocommit
def rescore_problem_module_state(module_descriptor, student_module, xmodule_instance_args=None):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
module_state_key = student_module.module_state_key
instance = _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args, grade_bucket_type='rescore')
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(loc=module_state_key,
student=student)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(u"error processing rescore call for course {course}, problem {loc} and student {student}: "
"unexpected response {msg}".format(msg=result, course=course_id, loc=module_state_key, student=student))
return False
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(u"error processing rescore call for course {course}, problem {loc} and student {student}: "
"{msg}".format(msg=result['success'], course=course_id, loc=module_state_key, student=student))
return False
else:
TASK_LOG.debug(u"successfully processed rescore call for course {course}, problem {loc} and student {student}: "
"{msg}".format(msg=result['success'], course=course_id, loc=module_state_key, student=student))
return True
@transaction.autocommit
def reset_attempts_module_state(_module_descriptor, student_module, xmodule_instance_args=None):
"""
Resets problem attempts to zero for specified `student_module`.
Always returns true, indicating success, if it doesn't raise an exception due to database error.
"""
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
old_number_of_attempts = problem_state["attempts"]
if old_number_of_attempts > 0:
problem_state["attempts"] = 0
# convert back to json and save
student_module.state = json.dumps(problem_state)
student_module.save()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student_module.student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
task_track(request_info, task_info, 'problem_reset_attempts', event_info, page='x_module_task')
# consider the reset to be successful, even if no update was performed. (It's just "optimized".)
return True
@transaction.autocommit
def delete_problem_module_state(_module_descriptor, student_module, xmodule_instance_args=None):
"""
Delete the StudentModule entry.
Always returns true, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student_module.student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
task_track(request_info, task_info, 'problem_delete_state', {}, page='x_module_task')
return True
| agpl-3.0 | -2,740,192,363,429,560,300 | 47.981959 | 135 | 0.689555 | false |
lnielsen/zenodo3 | tests/records/test_models.py | 1 | 2781 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo template tests."""
from __future__ import absolute_import, print_function
from datetime import date, timedelta
from zenodo.modules.records.models import AccessRight, ObjectType
def test_access_right(app):
"""Test basic access right features."""
for val in ['open', 'embargoed', 'restricted', 'closed']:
assert getattr(AccessRight, val.upper()) == val
assert AccessRight.is_valid(val)
assert not AccessRight.is_valid('invalid')
assert AccessRight.as_title(AccessRight.OPEN) == 'Open Access'
assert AccessRight.as_category(AccessRight.EMBARGOED) == 'warning'
options = AccessRight.as_options()
assert isinstance(options, tuple)
assert options[0] == ('open', 'Open Access')
def test_access_right_embargo(app):
"""Test access right embargo."""
assert AccessRight.get(AccessRight.OPEN) == 'open'
assert AccessRight.get(AccessRight.EMBARGOED) == 'embargoed'
# Embargo just lifted today.
assert AccessRight.get(
AccessRight.EMBARGOED, embargo_date=date.today()) == 'open'
# Future embargo date.
assert AccessRight.get(
AccessRight.EMBARGOED, embargo_date=date.today()+timedelta(days=1)) \
== 'embargoed'
def test_object_type(app):
"""Test object type."""
types = ['publication', 'poster', 'presentation', 'software', 'dataset',
'image', 'video']
def _assert_obj(obj):
assert '$schema' in obj
assert 'id' in obj
assert 'internal_id' in obj
assert 'title' in obj
assert 'en' in obj['title']
assert 'title_plural' in obj
assert 'en' in obj['title_plural']
assert 'schema.org' in obj
for c in obj.get('children', []):
_assert_obj(c)
for t in types:
_assert_obj(ObjectType.get(t))
| gpl-2.0 | 5,970,375,540,379,292,000 | 33.7625 | 77 | 0.68141 | false |
alheinecke/tensorflow-xsmm | tensorflow/python/estimator/checkpoint_utils_test.py | 19 | 3444 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpoint_utils.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.estimator import checkpoint_utils
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
def _create_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
v1 = variable_scope.get_variable("var1", [1, 10])
v2 = variable_scope.get_variable("var2", [10, 10])
v3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
v4 = variable_scope.get_variable("var4", [9, 9])
sess.run(variables.global_variables_initializer())
v1_value, v2_value, v3_value, v4_value = sess.run([v1, v2, v3, v4])
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value, v2_value, v3_value, v4_value
class CheckpointsTest(test.TestCase):
def testNoCheckpoints(self):
checkpoint_dir = self.get_temp_dir() + "/no_checkpoints"
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), [])
def testNoTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var5"), [])
def testGetTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), v1)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var2"), v2)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var3"), v3)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "useful_scope/var4"), v4)
def testGetAllVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
_create_checkpoints(session, checkpoint_dir)
self.assertEqual(
checkpoint_utils.list_variables(checkpoint_dir),
[("useful_scope/var4", [9, 9]), ("var1", [1, 10]), ("var2", [10, 10]),
("var3", [100, 100])])
if __name__ == "__main__":
test.main()
| apache-2.0 | 3,721,172,568,312,750,600 | 37.266667 | 80 | 0.686992 | false |
Qalthos/ansible | test/sanity/code-smell/no-smart-quotes.py | 29 | 1503 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
def main():
skip = set([
'docs/docsite/rst/dev_guide/testing/sanity/no-smart-quotes.rst',
'test/integration/targets/lookup_properties/lookup-8859-15.ini',
'test/integration/targets/template/files/encoding_1252_utf-8.expected',
'test/integration/targets/template/files/encoding_1252_windows-1252.expected',
'test/integration/targets/template/templates/encoding_1252.j2',
'test/integration/targets/unicode/unicode.yml',
'test/sanity/code-smell/%s' % os.path.basename(__file__),
])
prune = set([
'docs/docsite/_build/',
])
for path in sys.argv[1:] or sys.stdin.read().splitlines():
if path in skip:
continue
if any(path.startswith(p) for p in prune):
continue
with open(path, 'rb') as path_fd:
for line, text in enumerate(path_fd.readlines()):
try:
text = text.decode('utf-8')
except UnicodeDecodeError as ex:
print('%s:%d:%d: UnicodeDecodeError: %s' % (path, line + 1, ex.start + 1, ex))
continue
match = re.search(u'([‘’“”])', text)
if match:
print('%s:%d:%d: use ASCII quotes `\'` and `"` instead of Unicode quotes' % (
path, line + 1, match.start(1) + 1))
if __name__ == '__main__':
main()
| gpl-3.0 | 3,142,536,504,179,581,000 | 30.808511 | 98 | 0.545819 | false |
shuxin/androguard | androdd.py | 1 | 7242 | #!/usr/bin/env python
from __future__ import print_function
import os
import re
import shutil
import sys
from argparse import ArgumentParser
from androguard import session
from androguard.misc import clean_file_name
from androguard.core import androconf
from androguard.core.bytecode import method2dot, method2format
from androguard.core.bytecodes import dvm
from androguard.decompiler import decompiler
def valid_class_name(class_name):
if class_name[-1] == ";":
class_name = class_name[1:-1]
return os.path.join(*class_name.split("/"))
def create_directory(pathdir):
if not os.path.exists(pathdir):
os.makedirs(pathdir)
def export_apps_to_format(filename,
s,
output,
methods_filter=None,
jar=None,
decompiler_type=None,
form=None):
print("Dump information %s in %s" % (filename, output))
if not os.path.exists(output):
print("Create directory %s" % output)
os.makedirs(output)
else:
print("Clean directory %s" % output)
androconf.rrmdir(output)
os.makedirs(output)
methods_filter_expr = None
if methods_filter:
methods_filter_expr = re.compile(methods_filter)
dump_classes = []
for _, vm, vmx in s.get_objects_dex():
print("Decompilation ...", end=' ')
sys.stdout.flush()
if decompiler_type == "dex2jad":
vm.set_decompiler(decompiler.DecompilerDex2Jad(vm,
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["BIN_JAD"],
androconf.CONF["TMP_DIRECTORY"]))
elif decompiler_type == "dex2winejad":
vm.set_decompiler(decompiler.DecompilerDex2WineJad(vm,
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["BIN_WINEJAD"],
androconf.CONF["TMP_DIRECTORY"]))
elif decompiler_type == "ded":
vm.set_decompiler(decompiler.DecompilerDed(vm,
androconf.CONF["BIN_DED"],
androconf.CONF["TMP_DIRECTORY"]))
elif decompiler_type == "dex2fernflower":
vm.set_decompiler(decompiler.DecompilerDex2Fernflower(vm,
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["BIN_FERNFLOWER"],
androconf.CONF["OPTIONS_FERNFLOWER"],
androconf.CONF["TMP_DIRECTORY"]))
print("End")
if jar:
print("jar ...", end=' ')
filenamejar = decompiler.Dex2Jar(vm,
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["TMP_DIRECTORY"]).get_jar()
shutil.move(filenamejar, os.path.join(output, "classes.jar"))
print("End")
for method in vm.get_methods():
if methods_filter_expr:
msig = "%s%s%s" % (method.get_class_name(), method.get_name(),
method.get_descriptor())
if not methods_filter_expr.search(msig):
continue
# Current Folder to write to
filename_class = valid_class_name(method.get_class_name())
filename_class = os.path.join(output, filename_class)
create_directory(filename_class)
print("Dump %s %s %s ..." % (method.get_class_name(),
method.get_name(),
method.get_descriptor()), end=' ')
filename = clean_file_name(os.path.join(filename_class, method.get_short_string()))
buff = method2dot(vmx.get_method(method))
# Write Graph of method
if form:
print("%s ..." % form, end=' ')
method2format(filename + "." + form, form, None, buff)
# Write the Java file for the whole class
if method.get_class_name() not in dump_classes:
print("source codes ...", end=' ')
current_class = vm.get_class(method.get_class_name())
current_filename_class = valid_class_name(current_class.get_name())
current_filename_class = os.path.join(output, current_filename_class + ".java")
with open(current_filename_class, "w") as fd:
fd.write(current_class.get_source())
dump_classes.append(method.get_class_name())
# Write SMALI like code
print("bytecodes ...", end=' ')
bytecode_buff = dvm.get_bytecodes_method(vm, vmx, method)
with open(filename + ".ag", "w") as fd:
fd.write(bytecode_buff)
print()
if __name__ == "__main__":
parser = ArgumentParser(description="Decompile an APK and create Control Flow Graphs")
parser.add_argument("--version", "-v", action="store_true", default=False,
help="Print androguard version and exit")
parser.add_argument("--input", "-i",
help="resources.arsc or APK to parse (legacy option)")
parser.add_argument("file", nargs="?",
help="resources.arsc or APK to parse")
parser.add_argument("--output", "-o", required=True,
help="output directory. If the output folder already exsist, it will"
"be overwritten!")
parser.add_argument("--format", "-f",
help="Additionally write control flow graphs for each method,"
"specify the format for example png, jpg, raw (write dot file), ...")
parser.add_argument("--jar", "-j", action="store_true", default=False,
help="Use DEX2JAR to create a JAR file")
parser.add_argument("--limit", "-l",
help="Limit to certain methods only by regex (default: '.*')")
parser.add_argument("--decompiler", "-d",
help="Use a different decompiler (default: DAD)")
args = parser.parse_args()
if args.file and args.input:
print("Can not give --input and positional argument! Please use only one of them!", file=sys.stderr)
sys.exit(1)
if args.version:
print("Androaxml version %s" % androconf.ANDROGUARD_VERSION)
sys.exit(0)
if not args.input and not args.file:
print("Give one file to decode!", file=sys.stderr)
sys.exit(1)
if args.input:
fname = args.input
else:
fname = args.file
s = session.Session()
with open(fname, "rb") as fd:
s.add(fname, fd.read())
export_apps_to_format(fname, s, args.output, args.limit,
args.jar, args.decompiler, args.format)
| apache-2.0 | 5,372,420,446,172,829,000 | 40.382857 | 108 | 0.519056 | false |
kailIII/emaresa | trunk.pe.bk/sales_category_report/sale_report.py | 3 | 4367 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class sale_report(osv.osv):
_inherit = "sale.report"
_columns = {
'ln_id': fields.many2one('product.category','Category of Product', readonly=True),
'lp_id': fields.many2one('product.category','Category of Product', readonly=True),
'raiz_id': fields.many2one('product.category','Category of Product', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'sale_report')
#compania = self.pool.get('res_users').browse(cr, uid,[('id','=',uid)], context=context).company_id
cr.execute("""
create or replace view sale_report as (
select
min(l.id) as id,
l.product_id as product_id,
t.uom_id as product_uom,
sum(l.product_uom_qty / u.factor * u2.factor) as product_uom_qty,
sum(l.product_uom_qty * l.price_unit * (100.0-l.discount) / 100.0) as price_total,
1 as nbr,
s.date_order as date,
s.date_confirm as date_confirm,
to_char(s.date_order, 'YYYY') as year,
to_char(s.date_order, 'MM') as month,
to_char(s.date_order, 'YYYY-MM-DD') as day,
s.partner_id as partner_id,
s.user_id as user_id,
s.shop_id as shop_id,
s.company_id as company_id,
extract(epoch from avg(date_trunc('day',s.date_confirm)-date_trunc('day',s.create_date)))/(24*60*60)::decimal(16,2) as delay,
s.state,
t.categ_id as categ_id,
s.shipped,
s.shipped::integer as shipped_qty_1,
s.pricelist_id as pricelist_id,
s.project_id as analytic_account_id,
ln.id as ln_id,
lp.id as lp_id,
raiz.id as raiz_id
from
sale_order s
join sale_order_line l on (s.id=l.order_id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
inner join product_category lp on categ_id=lp.id
inner join product_category ln on lp.parent_id=ln.id
inner join product_category raiz on ln.parent_id=raiz.id
group by
l.product_id,
l.product_uom_qty,
l.order_id,
t.uom_id,
t.categ_id,
ln.id,
lp.id,
raiz.id,
s.date_order,
s.date_confirm,
s.partner_id,
s.user_id,
s.shop_id,
s.company_id,
s.state,
s.shipped,
s.pricelist_id,
s.project_id
)
""")
sale_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,044,744,322,526,266,400 | 44.020619 | 145 | 0.493932 | false |
p4datasystems/CarnotKE | jyhton/Lib/logging/__init__.py | 17 | 60350 | # Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, cStringIO, traceback, warnings, weakref
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning']
try:
import codecs
except ImportError:
codecs = None
try:
import thread
import threading
except ImportError:
thread = None
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
try:
unicode
_unicode = True
except NameError:
_unicode = False
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
elif __file__.endswith('$py.class'):
_srcfile = __file__[:-9] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = 1
#
# If you don't want threading information in the log, set this to zero
#
logThreads = 1
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = 1
#
# If you don't want process information in the log, set this to zero
#
logProcesses = 1
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, (int, long)):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if thread:
_lock = threading.RLock()
else:
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - long(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and thread:
self.thread = thread.get_ident()
self.threadName = threading.current_thread().name
else:
self.thread = None
self.threadName = None
if not logMultiprocessing:
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except StandardError:
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
if not _unicode: #if no unicode support...
msg = str(self.msg)
else:
msg = self.msg
if not isinstance(msg, basestring):
try:
msg = str(self.msg)
except UnicodeError:
msg = self.msg #Defer encoding till later
if self.args:
msg = msg % self.args
return msg
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = LogRecord(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)\\n" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
"""
if fmt:
self._fmt = fmt
else:
self._fmt = "%(message)s"
self.datefmt = datefmt
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
s = "%s,%03d" % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = cStringIO.StringIO()
traceback.print_exception(ei[0], ei[1], ei[2], None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._fmt.find("%(asctime)") >= 0
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self._fmt % record.__dict__
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
try:
s = s + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
s = s + record.exc_text.decode(sys.getfilesystemencoding(),
'replace')
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return 1
elif self.name == record.name:
return 1
elif record.name.find(self.name, 0, self.nlen) != 0:
return 0
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
"""
rv = 1
for f in self.filters:
if not f.filter(record):
rv = 0
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if thread:
self.lock = threading.RLock()
else:
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError:
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s\n"
if not _unicode: #if no unicode support...
stream.write(fs % msg)
else:
try:
if (isinstance(msg, unicode) and
getattr(stream, 'encoding', None)):
ufs = fs.decode(stream.encoding)
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
#Printing to terminals sometimes fails. For example,
#with an encoding of 'cp1251', the above write will
#work if written to a stream opened or wrapped by
#the codecs module, but fail when writing to a
#terminal even when the codepage is set to cp1251.
#An extra encoding step seems to be needed.
stream.write((ufs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
if codecs is None:
encoding = None
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
if self.encoding is None:
stream = open(self.baseFilename, self.mode)
else:
stream = codecs.open(self.baseFilename, self.mode, self.encoding)
return stream
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
#self.loggers = [alogger]
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
#if alogger not in self.loggers:
if alogger not in self.loggerMap:
#self.loggers.append(alogger)
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = 0
self.loggerDict = {}
self.loggerClass = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, basestring):
raise TypeError('A logger name must be string or Unicode')
if isinstance(name, unicode):
name = name.encode('utf-8')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = 1
self.handlers = []
self.disabled = 0
def setLevel(self, level):
"""
Set the logging level of this logger.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = 1
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func = self.findCaller()
except ValueError:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return 0
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.error(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
kwargs["exc_info"] = 1
self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.critical(msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
def isEnabledFor(self, level):
"""
See if the underlying logger is enabled for the specified level.
"""
return self.logger.isEnabledFor(level)
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
hdlr = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
hdlr = StreamHandler(stream)
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
fmt = Formatter(fs, dfs)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
#def getRootLogger():
# """
# Return the root logger.
#
# Note that getLogger('') now does the same thing, so this function is
# deprecated and may disappear in the future.
# """
# return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger,
with exception information.
"""
kwargs['exc_info'] = 1
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
warn = warning
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
| apache-2.0 | 5,706,937,941,857,209,000 | 33.924769 | 93 | 0.581707 | false |
cyberden/CouchPotatoServer | couchpotato/core/notifications/slack.py | 39 | 4503 | import json
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
autoload = 'Slack'
class Slack(Notification):
url = 'https://slack.com/api/chat.postMessage'
required_confs = ('token', 'channels',)
def notify(self, message='', data=None, listener=None):
for key in self.required_confs:
if not self.conf(key):
log.warning('Slack notifications are enabled, but '
'"{0}" is not specified.'.format(key))
return False
data = data or {}
message = message.strip()
if self.conf('include_imdb') and 'identifier' in data:
template = ' http://www.imdb.com/title/{0[identifier]}/'
message += template.format(data)
payload = {
'token': self.conf('token'),
'text': message,
'username': self.conf('bot_name'),
'unfurl_links': self.conf('include_imdb'),
'as_user': self.conf('as_user'),
'icon_url': self.conf('icon_url'),
'icon_emoji': self.conf('icon_emoji')
}
channels = self.conf('channels').split(',')
for channel in channels:
payload['channel'] = channel.strip()
response = self.urlopen(self.url, data=payload)
response = json.loads(response)
if not response['ok']:
log.warning('Notification sending to Slack has failed. Error '
'code: %s.', response['error'])
return False
return True
config = [{
'name': 'slack',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'slack',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'token',
'description': (
'Your Slack authentication token.',
'Can be created at https://api.slack.com/web'
)
},
{
'name': 'channels',
'description': (
'Channel to send notifications to.',
'Can be a public channel, private group or IM '
'channel. Can be an encoded ID or a name '
'(staring with a hashtag, e.g. #general). '
'Separate with commas in order to notify multiple '
'channels. It is however recommended to send '
'notifications to only one channel due to '
'the Slack API rate limits.'
)
},
{
'name': 'include_imdb',
'default': True,
'type': 'bool',
'descrpition': 'Include a link to the movie page on IMDB.'
},
{
'name': 'bot_name',
'description': 'Name of bot.',
'default': 'CouchPotato',
'advanced': True,
},
{
'name': 'as_user',
'description': 'Send message as the authentication token '
' user.',
'default': False,
'type': 'bool',
'advanced': True
},
{
'name': 'icon_url',
'description': 'URL to an image to use as the icon for '
'notifications.',
'advanced': True,
},
{
'name': 'icon_emoji',
'description': (
'Emoji to use as the icon for notifications.',
'Overrides icon_url'
),
'advanced': True,
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
| gpl-3.0 | -7,001,516,713,638,402,000 | 34.738095 | 79 | 0.40795 | false |
percy-g2/Novathor_xperia_u8500 | 6.1.1.B.0.253/external/webkit/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py | 19 | 2145 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.closebugforlanddiff import CloseBugForLandDiff
class CloseBugForLandDiffTest(unittest.TestCase):
def test_empty_state(self):
capture = OutputCapture()
step = CloseBugForLandDiff(MockTool(), MockOptions())
expected_stderr = "Committed r49824: <http://trac.webkit.org/changeset/49824>\nNo bug id provided.\n"
capture.assert_outputs(self, step.run, [{"commit_text" : "Mock commit text"}], expected_stderr=expected_stderr)
| gpl-2.0 | 2,118,910,875,156,846,800 | 52.625 | 119 | 0.777622 | false |
khkaminska/djangoproject.com | contact/tests.py | 9 | 2344 | from unittest import skipIf
import requests
from django.core import mail
from django.test import TestCase
from django.test.utils import override_settings
def check_network_connection():
try:
requests.get('https://djangoproject.com')
except requests.exceptions.ConnectionError:
return False
return True
has_network_connection = check_network_connection()
@override_settings(AKISMET_TESTING=True)
class ContactFormTests(TestCase):
def setUp(self):
self.url = '/contact/foundation/'
@override_settings(AKISMET_API_KEY='') # Disable Akismet in tests
def test_without_akismet(self):
response = self.client.post(self.url, {
'name': 'A. Random Hacker',
'email': '[email protected]',
'message_subject': 'Hello',
'body': 'Hello, World!'
})
self.assertRedirects(response, '/contact/sent/')
self.assertEqual(mail.outbox[-1].subject, '[Contact form] Hello')
@skipIf(not has_network_connection, 'Requires a network connection')
def test_empty_name(self):
response = self.client.post(self.url, {
'name': '',
'email': '[email protected]',
'message_subject': 'Hello',
'body': 'Hello, World!',
})
self.assertFormError(response, 'form', 'name', ['This field is required.'])
@skipIf(not has_network_connection, 'Requires a network connection')
def test_akismet_detect_spam(self):
response = self.client.post(self.url, {
'name': 'viagra-test-123', # according to akismet this should flag as spam
'email': '[email protected]',
'message_subject': 'Hello',
'body': 'Hello, World!'
})
self.assertContains(response, 'Akismet thinks this message is spam')
self.assertEqual(len(mail.outbox), 0)
@skipIf(not has_network_connection, 'Requires a network connection')
def test_akismet_not_spam(self):
response = self.client.post(self.url, {
'name': 'administrator',
'email': '[email protected]',
'message_subject': 'Hello',
'body': 'Hello, World!'
})
self.assertRedirects(response, '/contact/sent/')
self.assertEqual(mail.outbox[-1].subject, '[Contact form] Hello')
| bsd-3-clause | -8,343,157,735,432,976,000 | 34.515152 | 87 | 0.617321 | false |
fluxw42/youtube-dl | youtube_dl/extractor/iqiyi.py | 9 | 13109 | # coding: utf-8
from __future__ import unicode_literals
import hashlib
import itertools
import re
import time
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import (
clean_html,
decode_packed_codes,
get_element_by_id,
get_element_by_attribute,
ExtractorError,
ohdave_rsa_encrypt,
remove_start,
)
def md5_text(text):
return hashlib.md5(text.encode('utf-8')).hexdigest()
class IqiyiSDK(object):
def __init__(self, target, ip, timestamp):
self.target = target
self.ip = ip
self.timestamp = timestamp
@staticmethod
def split_sum(data):
return compat_str(sum(map(lambda p: int(p, 16), list(data))))
@staticmethod
def digit_sum(num):
if isinstance(num, int):
num = compat_str(num)
return compat_str(sum(map(int, num)))
def even_odd(self):
even = self.digit_sum(compat_str(self.timestamp)[::2])
odd = self.digit_sum(compat_str(self.timestamp)[1::2])
return even, odd
def preprocess(self, chunksize):
self.target = md5_text(self.target)
chunks = []
for i in range(32 // chunksize):
chunks.append(self.target[chunksize * i:chunksize * (i + 1)])
if 32 % chunksize:
chunks.append(self.target[32 - 32 % chunksize:])
return chunks, list(map(int, self.ip.split('.')))
def mod(self, modulus):
chunks, ip = self.preprocess(32)
self.target = chunks[0] + ''.join(map(lambda p: compat_str(p % modulus), ip))
def split(self, chunksize):
modulus_map = {
4: 256,
5: 10,
8: 100,
}
chunks, ip = self.preprocess(chunksize)
ret = ''
for i in range(len(chunks)):
ip_part = compat_str(ip[i] % modulus_map[chunksize]) if i < 4 else ''
if chunksize == 8:
ret += ip_part + chunks[i]
else:
ret += chunks[i] + ip_part
self.target = ret
def handle_input16(self):
self.target = md5_text(self.target)
self.target = self.split_sum(self.target[:16]) + self.target + self.split_sum(self.target[16:])
def handle_input8(self):
self.target = md5_text(self.target)
ret = ''
for i in range(4):
part = self.target[8 * i:8 * (i + 1)]
ret += self.split_sum(part) + part
self.target = ret
def handleSum(self):
self.target = md5_text(self.target)
self.target = self.split_sum(self.target) + self.target
def date(self, scheme):
self.target = md5_text(self.target)
d = time.localtime(self.timestamp)
strings = {
'y': compat_str(d.tm_year),
'm': '%02d' % d.tm_mon,
'd': '%02d' % d.tm_mday,
}
self.target += ''.join(map(lambda c: strings[c], list(scheme)))
def split_time_even_odd(self):
even, odd = self.even_odd()
self.target = odd + md5_text(self.target) + even
def split_time_odd_even(self):
even, odd = self.even_odd()
self.target = even + md5_text(self.target) + odd
def split_ip_time_sum(self):
chunks, ip = self.preprocess(32)
self.target = compat_str(sum(ip)) + chunks[0] + self.digit_sum(self.timestamp)
def split_time_ip_sum(self):
chunks, ip = self.preprocess(32)
self.target = self.digit_sum(self.timestamp) + chunks[0] + compat_str(sum(ip))
class IqiyiSDKInterpreter(object):
def __init__(self, sdk_code):
self.sdk_code = sdk_code
def run(self, target, ip, timestamp):
self.sdk_code = decode_packed_codes(self.sdk_code)
functions = re.findall(r'input=([a-zA-Z0-9]+)\(input', self.sdk_code)
sdk = IqiyiSDK(target, ip, timestamp)
other_functions = {
'handleSum': sdk.handleSum,
'handleInput8': sdk.handle_input8,
'handleInput16': sdk.handle_input16,
'splitTimeEvenOdd': sdk.split_time_even_odd,
'splitTimeOddEven': sdk.split_time_odd_even,
'splitIpTimeSum': sdk.split_ip_time_sum,
'splitTimeIpSum': sdk.split_time_ip_sum,
}
for function in functions:
if re.match(r'mod\d+', function):
sdk.mod(int(function[3:]))
elif re.match(r'date[ymd]{3}', function):
sdk.date(function[4:])
elif re.match(r'split\d+', function):
sdk.split(int(function[5:]))
elif function in other_functions:
other_functions[function]()
else:
raise ExtractorError('Unknown funcion %s' % function)
return sdk.target
class IqiyiIE(InfoExtractor):
IE_NAME = 'iqiyi'
IE_DESC = '爱奇艺'
_VALID_URL = r'https?://(?:(?:[^.]+\.)?iqiyi\.com|www\.pps\.tv)/.+\.html'
_NETRC_MACHINE = 'iqiyi'
_TESTS = [{
'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
# MD5 checksum differs on my machine and Travis CI
'info_dict': {
'id': '9c1fb1b99d192b21c559e5a1a2cb3c73',
'ext': 'mp4',
'title': '美国德州空中惊现奇异云团 酷似UFO',
}
}, {
'url': 'http://www.iqiyi.com/v_19rrhnnclk.html',
'md5': 'b7dc800a4004b1b57749d9abae0472da',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb',
'ext': 'mp4',
# This can be either Simplified Chinese or Traditional Chinese
'title': r're:^(?:名侦探柯南 国语版:第752集 迫近灰原秘密的黑影 下篇|名偵探柯南 國語版:第752集 迫近灰原秘密的黑影 下篇)$',
},
'skip': 'Geo-restricted to China',
}, {
'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html',
'only_matching': True,
}, {
'url': 'http://www.iqiyi.com/a_19rrhbc6kt.html',
'only_matching': True,
}, {
'url': 'http://yule.iqiyi.com/pcb.html',
'only_matching': True,
}, {
# VIP-only video. The first 2 parts (6 minutes) are available without login
# MD5 sums omitted as values are different on Travis CI and my machine
'url': 'http://www.iqiyi.com/v_19rrny4w8w.html',
'info_dict': {
'id': 'f3cf468b39dddb30d676f89a91200dc1',
'ext': 'mp4',
'title': '泰坦尼克号',
},
'skip': 'Geo-restricted to China',
}, {
'url': 'http://www.iqiyi.com/a_19rrhb8ce1.html',
'info_dict': {
'id': '202918101',
'title': '灌篮高手 国语版',
},
'playlist_count': 101,
}, {
'url': 'http://www.pps.tv/w_19rrbav0ph.html',
'only_matching': True,
}]
_FORMATS_MAP = {
'96': 1, # 216p, 240p
'1': 2, # 336p, 360p
'2': 3, # 480p, 504p
'21': 4, # 504p
'4': 5, # 720p
'17': 5, # 720p
'5': 6, # 1072p, 1080p
'18': 7, # 1080p
}
def _real_initialize(self):
self._login()
@staticmethod
def _rsa_fun(data):
# public key extracted from http://static.iqiyi.com/js/qiyiV2/20160129180840/jobs/i18n/i18nIndex.js
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
e = 65537
return ohdave_rsa_encrypt(data, e, N)
def _login(self):
(username, password) = self._get_login_info()
# No authentication to be performed
if not username:
return True
data = self._download_json(
'http://kylin.iqiyi.com/get_token', None,
note='Get token for logging', errnote='Unable to get token for logging')
sdk = data['sdk']
timestamp = int(time.time())
target = '/apis/reglogin/login.action?lang=zh_TW&area_code=null&email=%s&passwd=%s&agenttype=1&from=undefined&keeplogin=0&piccode=&fromurl=&_pos=1' % (
username, self._rsa_fun(password.encode('utf-8')))
interp = IqiyiSDKInterpreter(sdk)
sign = interp.run(target, data['ip'], timestamp)
validation_params = {
'target': target,
'server': 'BEA3AA1908656AABCCFF76582C4C6660',
'token': data['token'],
'bird_src': 'f8d91d57af224da7893dd397d52d811a',
'sign': sign,
'bird_t': timestamp,
}
validation_result = self._download_json(
'http://kylin.iqiyi.com/validate?' + compat_urllib_parse_urlencode(validation_params), None,
note='Validate credentials', errnote='Unable to validate credentials')
MSG_MAP = {
'P00107': 'please login via the web interface and enter the CAPTCHA code',
'P00117': 'bad username or password',
}
code = validation_result['code']
if code != 'A00000':
msg = MSG_MAP.get(code)
if not msg:
msg = 'error %s' % code
if validation_result.get('msg'):
msg += ': ' + validation_result['msg']
self._downloader.report_warning('unable to log in: ' + msg)
return False
return True
def get_raw_data(self, tvid, video_id):
tm = int(time.time() * 1000)
key = 'd5fb4bd9d50c4be6948c97edd7254b0e'
sc = md5_text(compat_str(tm) + key + tvid)
params = {
'tvid': tvid,
'vid': video_id,
'src': '76f90cbd92f94a2e925d83e8ccd22cb7',
'sc': sc,
't': tm,
}
return self._download_json(
'http://cache.m.iqiyi.com/jp/tmts/%s/%s/' % (tvid, video_id),
video_id, transform_source=lambda s: remove_start(s, 'var tvInfoJs='),
query=params, headers=self.geo_verification_headers())
def _extract_playlist(self, webpage):
PAGE_SIZE = 50
links = re.findall(
r'<a[^>]+class="site-piclist_pic_link"[^>]+href="(http://www\.iqiyi\.com/.+\.html)"',
webpage)
if not links:
return
album_id = self._search_regex(
r'albumId\s*:\s*(\d+),', webpage, 'album ID')
album_title = self._search_regex(
r'data-share-title="([^"]+)"', webpage, 'album title', fatal=False)
entries = list(map(self.url_result, links))
# Start from 2 because links in the first page are already on webpage
for page_num in itertools.count(2):
pagelist_page = self._download_webpage(
'http://cache.video.qiyi.com/jp/avlist/%s/%d/%d/' % (album_id, page_num, PAGE_SIZE),
album_id,
note='Download playlist page %d' % page_num,
errnote='Failed to download playlist page %d' % page_num)
pagelist = self._parse_json(
remove_start(pagelist_page, 'var tvInfoJs='), album_id)
vlist = pagelist['data']['vlist']
for item in vlist:
entries.append(self.url_result(item['vurl']))
if len(vlist) < PAGE_SIZE:
break
return self.playlist_result(entries, album_id, album_title)
def _real_extract(self, url):
webpage = self._download_webpage(
url, 'temp_id', note='download video page')
# There's no simple way to determine whether an URL is a playlist or not
# So detect it
playlist_result = self._extract_playlist(webpage)
if playlist_result:
return playlist_result
tvid = self._search_regex(
r'data-player-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid')
video_id = self._search_regex(
r'data-player-videoid\s*=\s*[\'"]([a-f\d]+)', webpage, 'video_id')
formats = []
for _ in range(5):
raw_data = self.get_raw_data(tvid, video_id)
if raw_data['code'] != 'A00000':
if raw_data['code'] == 'A00111':
self.raise_geo_restricted()
raise ExtractorError('Unable to load data. Error code: ' + raw_data['code'])
data = raw_data['data']
for stream in data['vidl']:
if 'm3utx' not in stream:
continue
vd = compat_str(stream['vd'])
formats.append({
'url': stream['m3utx'],
'format_id': vd,
'ext': 'mp4',
'preference': self._FORMATS_MAP.get(vd, -1),
'protocol': 'm3u8_native',
})
if formats:
break
self._sleep(5, video_id)
self._sort_formats(formats)
title = (get_element_by_id('widget-videotitle', webpage) or
clean_html(get_element_by_attribute('class', 'mod-play-tit', webpage)))
return {
'id': video_id,
'title': title,
'formats': formats,
}
| unlicense | -665,217,653,709,193,300 | 32.582902 | 159 | 0.53915 | false |
stormbeard/pyvmomi-community-samples | samples/tests/interactive_wrapper_tests.py | 13 | 3510 | from unittest import TestCase
from mock import Mock
from samples.tools.interactive_wrapper import (
VM,
ESX,
get_all_vms_in_folder
)
class VMTests(TestCase):
def setUp(self):
self.raw_vm = Mock()
self.wrapped_vm = VM(self.raw_vm)
def test_should_passthrough_unwrapped_attributes(self):
self.assertEqual(self.wrapped_vm.anything, self.raw_vm.anything)
def test_should_return_interface_when_one_matches(self):
foo_mock = lambda: None
foo_mock.name = "foo"
bar_mock = lambda: None
bar_mock.name = "bar"
self.raw_vm.network = [foo_mock, bar_mock]
bar = lambda n: n.name == "bar"
actual = self.wrapped_vm.get_first_network_interface_matching(bar)
self.assertEqual(actual, bar_mock)
def test_should_return_first_interface_when_several_match(self):
aha_mock = lambda: None
aha_mock.name = "aha"
foo_mock_1 = lambda: None
foo_mock_1.name = "foo"
bar_mock = lambda: None
bar_mock.name = "bar"
foo_mock_2 = lambda: None
foo_mock_2.name = "foo"
self.raw_vm.network = [aha_mock, foo_mock_1, bar_mock, foo_mock_2]
foo = lambda n: n.name == "foo"
actual = self.wrapped_vm.get_first_network_interface_matching(foo)
self.assertEqual(actual, foo_mock_1)
class ESXTests(TestCase):
def setUp(self):
self.raw_esx = Mock()
self.raw_esx.name = "esx-name"
self.wrapped_esx = ESX(self.raw_esx)
def test_should_passthrough_unwrapped_attributes(self):
self.assertEqual(self.wrapped_esx.anything, self.raw_esx.anything)
def test_should_equal_to_esx_with_same_name(self):
other_raw_esx = Mock()
other_raw_esx.name = "esx-name"
other_esx = ESX(other_raw_esx)
self.assertTrue(self.wrapped_esx == other_esx)
def test_should_not_equal_to_esx_with_other_name(self):
other_raw_esx = Mock()
other_raw_esx.name = "other-esx-name"
other_esx = ESX(other_raw_esx)
self.assertFalse(self.wrapped_esx == other_esx)
def test_should_raise_when_number_of_cores_not_in_resources(self):
resources = []
self.raw_esx.licensableResource.resource = resources
self.assertRaises(RuntimeError, self.wrapped_esx.get_number_of_cores)
def test_should_return_number_of_cores_when_in_resources(self):
resource_1 = Mock()
resource_1.key = "weLoveCamelCase"
resource_2 = Mock()
resource_2.key = "numCpuCores"
resource_2.value = 42
resource_3 = Mock()
resource_3.key = "someOtherKey"
resources = [resource_1, resource_2, resource_3]
self.raw_esx.licensableResource.resource = resources
self.assertEquals(self.wrapped_esx.get_number_of_cores(), 42)
class getAllVMInFolderTests(TestCase):
def test_should_resolve_deep_nesting(self):
vm_1 = lambda: None
vm_1.name = "vm-1"
vm_2 = lambda: None
vm_2.name = "vm-2"
level_2_nesting = [vm_2]
child_folder = Mock()
child_folder.childEntity = level_2_nesting
level_1_nesting = [vm_1, child_folder]
root_folder = Mock()
root_folder.childEntity = level_1_nesting
actual_vms = [vm for vm in get_all_vms_in_folder(root_folder)]
self.assertEqual(len(actual_vms), 2)
self.assertEqual(actual_vms[0].raw_vm, vm_1)
self.assertEqual(actual_vms[1].raw_vm, vm_2)
| apache-2.0 | -2,916,098,320,874,111,500 | 30.339286 | 77 | 0.622507 | false |
leiferikb/bitpop | src/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py | 2 | 3949 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.controllers.test_result_writer import write_test_result
from webkitpy.layout_tests.port.driver import DriverOutput
from webkitpy.layout_tests.port.test import TestPort
from webkitpy.layout_tests.models import test_failures
class TestResultWriterTests(unittest.TestCase):
def run_test(self, failures=None, files=None):
failures = failures or []
host = MockSystemHost()
host.filesystem.files = files or {}
port = TestPort(host=host, port_name='test-mac-snowleopard', options=optparse.Values())
actual_output = DriverOutput(text='', image=None, image_hash=None, audio=None)
expected_output = DriverOutput(text='', image=None, image_hash=None, audio=None)
write_test_result(host.filesystem, port, '/tmp', 'foo.html', actual_output, expected_output, failures)
return host.filesystem.written_files
def test_success(self):
# Nothing is written when the test passes.
written_files = self.run_test(failures=[])
self.assertEqual(written_files, {})
def test_reference_exists(self):
failure = test_failures.FailureReftestMismatch()
failure.reference_filename = '/src/exists-expected.html'
files = {'/src/exists-expected.html': 'yup'}
written_files = self.run_test(failures=[failure], files=files)
self.assertEqual(written_files, {'/tmp/exists-expected.html': 'yup'})
failure = test_failures.FailureReftestMismatchDidNotOccur()
failure.reference_filename = '/src/exists-expected-mismatch.html'
files = {'/src/exists-expected-mismatch.html': 'yup'}
written_files = self.run_test(failures=[failure], files=files)
self.assertEqual(written_files, {'/tmp/exists-expected-mismatch.html': 'yup'})
def test_reference_is_missing(self):
failure = test_failures.FailureReftestMismatch()
failure.reference_filename = 'notfound.html'
written_files = self.run_test(failures=[failure], files={})
self.assertEqual(written_files, {})
failure = test_failures.FailureReftestMismatchDidNotOccur()
failure.reference_filename = 'notfound.html'
written_files = self.run_test(failures=[failure], files={})
self.assertEqual(written_files, {})
| gpl-3.0 | 4,685,975,626,771,379,000 | 50.285714 | 110 | 0.732591 | false |
rdipietro/tensorflow | tensorflow/contrib/rnn/python/kernel_tests/gru_ops_test.py | 8 | 23247 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Block GRU module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn.python.ops import gru_ops
from tensorflow.python.ops import variable_scope as vs
class GRUBlockCellTest(tf.test.TestCase):
_use_gpu = False
def testNoneDimsWithDynamicRNN(self):
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
batch_size = 4
cell_size = 5
input_size = 6
num_steps = 7
cell = gru_ops.GRUBlockCell(cell_size)
x = tf.placeholder(tf.float32, shape=(None, None, input_size))
_, output = tf.nn.dynamic_rnn(cell, x, time_major=True, dtype=tf.float32)
sess.run(tf.global_variables_initializer())
feed = {}
feed[x] = np.random.randn(num_steps, batch_size, input_size)
sess.run(output, feed)
def testBlockGRUToGRUCellSingleStep(self):
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
batch_size = 4
cell_size = 5
input_size = 6
seed = 1994
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=seed)
# Inputs
x = tf.zeros([batch_size, input_size])
h = tf.zeros([batch_size, cell_size])
# Values for the inputs.
x_value = np.random.rand(batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Output from the basic GRU cell implementation.
with tf.variable_scope("basic", initializer=initializer):
output = tf.nn.rnn_cell.GRUCell(cell_size)(x, h)
sess.run([tf.global_variables_initializer()])
basic_res = sess.run([output], {x: x_value, h: h_value})
# Output from the block GRU cell implementation.
with tf.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([tf.global_variables_initializer()])
block_res = sess.run([output], {x: x_value, h: h_value})
self.assertEqual(len(block_res), len(basic_res))
for block, basic in zip(block_res, basic_res):
self.assertAllClose(block, basic)
def testBlockGRUToGRUCellMultiStep(self):
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
batch_size = 2
cell_size = 3
input_size = 3
time_steps = 4
# Random initializers.
seed = 1994
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = tf.placeholder(
tf.float32, shape=(time_steps, batch_size, input_size))
h = tf.zeros([batch_size, cell_size])
# Values for the inputs.
x_values = np.random.rand(time_steps, batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Output from the block GRU cell implementation.
with tf.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=tf.float32)
feeds = {concat_x: x_values, h: h_value}
sess.run([tf.global_variables_initializer()])
block_res = sess.run([outputs_dynamic, state_dynamic], feeds)
# Output from the basic GRU cell implementation.
with tf.variable_scope("basic", initializer=initializer):
cell = tf.nn.rnn_cell.GRUCell(cell_size)
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=tf.float32)
feeds = {concat_x: x_values, h: h_value}
sess.run([tf.global_variables_initializer()])
basic_res = sess.run([outputs_dynamic, state_dynamic], feeds)
# Check the lengths of the outputs_dynamic, and states.
self.assertEqual(len(block_res), len(basic_res))
self.assertEqual(len(block_res[0]), len(basic_res[0]))
self.assertEqual(len(block_res[1]), len(basic_res[1]))
# Check the outputs_dynamic values.
for block_output, basic_output in zip(block_res[0], basic_res[0]):
self.assertAllClose(block_output, basic_output)
# Check the state_dynamic value.
self.assertAllClose(block_res[1], block_res[1])
def testDerivativeOfBlockGRUToGRUCellSingleStep(self):
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
batch_size = 2
cell_size = 3
input_size = 4
seed = 1994
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=seed)
np.random.seed(seed)
# Inputs
x = tf.zeros([batch_size, input_size])
h = tf.zeros([batch_size, cell_size])
# Values for the inputs.
x_value = np.random.rand(batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Gradients from the block GRU cell implementation.
with tf.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([tf.global_variables_initializer()])
all_variables = tf.all_variables()[0:4]
[w_ru, b_ru, w_c, b_c] = all_variables
d_new_h_wrt_x = tf.gradients([output], x)
d_new_h_wrt_h = tf.gradients([output], h)
d_new_h_wrt_w_ru = tf.gradients([output], w_ru)
d_new_h_wrt_w_c = tf.gradients([output], w_c)
d_new_h_wrt_b_ru = tf.gradients([output], b_ru)
d_new_h_wrt_b_c = tf.gradients([output], b_c)
d_block_res = sess.run([d_new_h_wrt_x, d_new_h_wrt_h, d_new_h_wrt_w_ru,
d_new_h_wrt_w_c, d_new_h_wrt_b_ru,
d_new_h_wrt_b_c], {x: x_value,
h: h_value})
# Gradients from the basic GRU cell implementation.
with tf.variable_scope("basic", initializer=initializer):
output = tf.nn.rnn_cell.GRUCell(cell_size)(x, h)
sess.run([tf.global_variables_initializer()])
all_variables = tf.all_variables()[4:8]
[w_ru, b_ru, w_c, b_c] = all_variables
d_new_h_wrt_x = tf.gradients([output], x)
d_new_h_wrt_h = tf.gradients([output], h)
d_new_h_wrt_w_ru = tf.gradients([output], w_ru)
d_new_h_wrt_w_c = tf.gradients([output], w_c)
d_new_h_wrt_b_ru = tf.gradients([output], b_ru)
d_new_h_wrt_b_c = tf.gradients([output], b_c)
d_basic_res = sess.run([d_new_h_wrt_x, d_new_h_wrt_h, d_new_h_wrt_w_ru,
d_new_h_wrt_w_c, d_new_h_wrt_b_ru,
d_new_h_wrt_b_c], {x: x_value,
h: h_value})
# Check lengths of derivative results.
self.assertEqual(len(d_block_res), len(d_basic_res))
# Check the value of every derivative result.
for block, basic in zip(d_block_res, d_basic_res):
self.assertAllClose(block, basic)
def testDerivativeOfBlockGRUToGRUCellMultiSteps(self):
batch_size = 2
cell_size = 3
input_size = 4
time_steps = 2
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
# Random initializers.
seed = 1994
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = tf.placeholder(
tf.float32, shape=(time_steps, batch_size, input_size))
h = tf.zeros([batch_size, cell_size])
# Values for the inputs.
x_values = np.random.rand(time_steps, batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
feeds = {concat_x: x_values, h: h_value}
# Gradients from the block GRU cell implementation.
with tf.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, _ = tf.nn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=tf.float32)
grad_output_wrt_x = tf.gradients([outputs_dynamic[0]], concat_x)
grad_output_wrt_h = tf.gradients([outputs_dynamic[0]], h)
sess.run([tf.global_variables_initializer()])
block_grad_res_x, block_grad_res_h = sess.run(
[grad_output_wrt_x, grad_output_wrt_h], feeds)
# Gradients from the basic GRU cell implementation.
with tf.variable_scope("basic", initializer=initializer):
cell = tf.nn.rnn_cell.GRUCell(cell_size)
outputs_dynamic, _ = tf.nn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=tf.float32)
grad_output_wrt_x = tf.gradients([outputs_dynamic[0]], concat_x)
grad_output_wrt_h = tf.gradients([outputs_dynamic[0]], h)
sess.run([tf.global_variables_initializer()])
basic_grad_res_x, basic_grad_res_h = sess.run(
[grad_output_wrt_x, grad_output_wrt_h], feeds)
# Check derivatives values of the outputs wrt to x.
self.assertEqual(len(block_grad_res_x), len(basic_grad_res_x))
# Check derivatives values of the outputs wrt to h.
for block, basic in zip(block_grad_res_x, basic_grad_res_x):
self.assertAllClose(block, basic)
# Check derivatives values of the outputs wrt to x.
self.assertEqual(len(block_grad_res_h), len(basic_grad_res_h))
# Check derivatives values of the outputs wrt to h.
for block, basic in zip(block_grad_res_h, basic_grad_res_h):
self.assertAllClose(block, basic)
def testGradient(self):
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
batch_size = 1
cell_size = 3
input_size = 2
# Inputs
x = tf.zeros([batch_size, input_size])
h = tf.zeros([batch_size, cell_size])
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([tf.global_variables_initializer()])
all_variables = tf.all_variables()
[w_ru, b_ru, w_c, b_c] = all_variables[:4]
error_x = tf.test.compute_gradient_error(x, (batch_size, input_size),
output[0],
(batch_size, cell_size))
error_h = tf.test.compute_gradient_error(h, (batch_size, cell_size),
output[0],
(batch_size, cell_size))
error_w_ru = tf.test.compute_gradient_error(w_ru, (input_size + cell_size,
2 * cell_size),
output[0],
(batch_size, cell_size))
error_w_c = tf.test.compute_gradient_error(w_c, (input_size + cell_size,
cell_size), output[0],
(batch_size, cell_size))
error_b_ru = tf.test.compute_gradient_error(b_ru, (2 * cell_size,),
output[0],
(batch_size, cell_size))
error_b_c = tf.test.compute_gradient_error(b_c, (cell_size,), output[0],
(batch_size, cell_size))
eps = 1e-4
self.assertLess(error_x, eps)
self.assertLess(error_h, eps)
self.assertLess(error_w_ru, eps)
self.assertLess(error_w_c, eps)
self.assertLess(error_b_ru, eps)
self.assertLess(error_b_c, eps)
class GRUBlockCellGpuTest(GRUBlockCellTest):
_use_gpu = True
#### Benchmarking GRUBlockCell vs GRUCell.
def time_taken_by_op(op, sess, num_runs=50):
"""Time taken by the Op."""
for _ in range(2):
sess.run([op])
start_time = time.time()
for _ in range(num_runs):
sess.run([op])
end_time = time.time()
time_taken = end_time - start_time
return time_taken
def training_gru_block_vs_gru_cell(batch_size,
cell_size,
input_size,
time_steps,
use_gpu=False,
iters=30):
"""Benchmark training speed between GRUBlockCell vs GRUCell."""
tf.reset_default_graph()
with tf.Session(graph=tf.Graph()) as sess:
# Specify the device which is been used.
with tf.device("/cpu:0" if not use_gpu else "/gpu:0"):
# Random initializers.
seed = 1994
initializer = tf.random_uniform_initializer(-1, 1, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = vs.get_variable("concat_x",
[time_steps, batch_size, input_size])
h = vs.get_variable("h", [batch_size, cell_size])
y = vs.get_variable("y", [time_steps, batch_size, cell_size])
# Output from the basic GRU cell implementation.
with tf.variable_scope("basic", initializer=initializer):
cell = tf.nn.rnn_cell.GRUCell(cell_size)
outputs_dynamic, _ = tf.nn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=tf.float32)
sess.run([tf.global_variables_initializer()])
cost = tf.reduce_mean(tf.square(outputs_dynamic - y))
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
cost)
# time for a training step.
basic_time_training = time_taken_by_op(optimizer, sess, iters)
# Output from the basic GRU cell implementation.
with tf.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, _ = tf.nn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=tf.float32)
sess.run([tf.global_variables_initializer()])
cost = tf.reduce_mean(tf.square(outputs_dynamic - y))
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
cost)
# time for a training step.
block_time_training = time_taken_by_op(optimizer, sess, iters)
performance_training = (
basic_time_training - block_time_training) * 100 / basic_time_training
print(",".join([str(batch_size), str(cell_size), str(input_size), str(
time_steps), str(use_gpu), str(basic_time_training), str(
block_time_training), str(performance_training)]))
return basic_time_training, block_time_training
def inference_gru_block_vs_gru_cell(batch_size,
cell_size,
input_size,
time_steps,
use_gpu=False,
iters=30):
"""Benchmark inference speed between GRUBlockCell vs GRUCell."""
tf.reset_default_graph()
with tf.Session(graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else "/gpu:0"):
# Random initializers.
seed = 1994
initializer = tf.random_uniform_initializer(-1, 1, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = vs.get_variable("concat_x",
[time_steps, batch_size, input_size])
h = vs.get_variable("h", [batch_size, cell_size])
# Output from the basic GRU cell implementation.
with tf.variable_scope("basic", initializer=initializer):
cell = tf.nn.rnn_cell.GRUCell(cell_size)
outputs_dynamic, _ = tf.nn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=tf.float32)
sess.run([tf.global_variables_initializer()])
basic_time_inference = time_taken_by_op(outputs_dynamic, sess, iters)
# Output from the block GRU cell implementation.
with tf.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, _ = tf.nn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=tf.float32)
sess.run([tf.global_variables_initializer()])
block_time_inference = time_taken_by_op(outputs_dynamic, sess, iters)
performance_inference = (basic_time_inference - block_time_inference
) * 100 / basic_time_inference
print(",".join([str(batch_size), str(cell_size), str(input_size), str(
time_steps), str(use_gpu), str(basic_time_inference), str(
block_time_inference), str(performance_inference)]))
return basic_time_inference, block_time_inference
def single_bprop_step_gru_block_vs_gru_cell(batch_size,
cell_size,
input_size,
use_gpu=False,
iters=30):
"""Benchmark single bprop step speed between GRUBlockCell vs GRUCell."""
tf.reset_default_graph()
with tf.Session(graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else "/gpu:0"):
initializer = tf.random_uniform_initializer(-1, 1, seed=1989)
# Inputs
x = vs.get_variable("x", [batch_size, input_size])
h = vs.get_variable("h", [batch_size, cell_size])
# Output from the basic GRU cell implementation.
with tf.variable_scope("basic", initializer=initializer):
output = tf.nn.rnn_cell.GRUCell(cell_size)(tf.identity(x),
tf.identity(h))
sess.run([tf.global_variables_initializer()])
grad_output_wrt_input = tf.gradients([output], h)
basic_time_bprop = time_taken_by_op(grad_output_wrt_input, sess, iters)
# Output from the block GRU cell implementation.
with tf.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(tf.identity(x), tf.identity(h))
sess.run([tf.global_variables_initializer()])
grad_output_wrt_input = tf.gradients([output], h)
block_time_bprop = time_taken_by_op(grad_output_wrt_input, sess, iters)
performance_inference = (
basic_time_bprop - block_time_bprop) * 100 / basic_time_bprop
print(",".join([str(batch_size), str(cell_size), str(input_size), str(
use_gpu), str(basic_time_bprop), str(block_time_bprop), str(
performance_inference)]))
return basic_time_bprop, block_time_bprop
class BenchmarkGRUBlock(tf.test.Benchmark):
def benchmarkTrainingBlockGRUVsGRUCell(self):
print("Comparison GRUBlockCell vs GRUCell")
print("--------------------------------------------------------------")
print("Training speed GRUBlockCell vs GRUCell")
print("batch_size, cell_size, input_size, time_steps, GPU, "
"basic_time_training, block_time_training, performance_training[%]")
iters = 10
for use_gpu in [True, False]:
for batch_size in [1, 32, 128]:
for cell_size in [128, 512]:
for input_size in [128, 512]:
for time_steps in [50]:
basic_time, block_time = training_gru_block_vs_gru_cell(
batch_size, cell_size, input_size, time_steps, use_gpu, iters)
self.report_benchmark(
name="GRUCell_training_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(batch_size, cell_size, input_size, time_steps, use_gpu),
iters=iters,
wall_time=basic_time)
self.report_benchmark(
name="GRUBlockCell_training_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(batch_size, cell_size, input_size, time_steps, use_gpu),
iters=iters,
wall_time=block_time)
def benchmarkInferenceBlockGRUVsGRUCell(self):
print("--------------------------------------------------------------")
print("Inference speed GRUBlockCell vs GRUCell")
print(
"batch_size, cell_size, input_size, time_steps, GPU, "
"basic_time_inference, block_time_inference, performance_inference[%]")
iters = 10
for use_gpu in [True, False]:
for batch_size in [1, 32, 128]:
for cell_size in [128, 512]:
for input_size in [128, 512]:
for time_steps in [50]:
basic_time, block_time = inference_gru_block_vs_gru_cell(
batch_size, cell_size, input_size, time_steps, use_gpu, iters)
self.report_benchmark(
name="GRUCell_inference_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(batch_size, cell_size, input_size, time_steps, use_gpu),
iters=iters,
wall_time=basic_time)
self.report_benchmark(
name="GRUBlockCell_inference_time_BS%i_CS%i_IS%i_TS%i_gpu_%s"
% (batch_size, cell_size, input_size, time_steps, use_gpu),
iters=iters,
wall_time=block_time)
def benchmarkSingleBpropStepBlockGRUVsGRUCell(self):
print("--------------------------------------------------------------")
print("Single bprop step speed GRUBlockCell vs GRUCell")
print("batch_size, cell_size, input_size, GPU, basic_time, "
"block_time, performance_inference[%]")
iters = 10
for use_gpu in [True, False]:
for batch_size in [1, 32, 128]:
for cell_size in [128, 512]:
for input_size in [128, 512]:
basic_time, block_time = single_bprop_step_gru_block_vs_gru_cell(
batch_size, cell_size, input_size, use_gpu, iters)
self.report_benchmark(
name="GRUCell_Bprop_single_step_time_BS%i_CS%i_IS%i_gpu_%s" %
(batch_size, cell_size, input_size, use_gpu),
iters=iters,
wall_time=basic_time)
self.report_benchmark(
name="GRUBlockCell_Bprop_single_step_time_BS%i_CS%i_IS%i_gpu_%s"
% (batch_size, cell_size, input_size, use_gpu),
iters=iters,
wall_time=block_time)
print("--------------------------------------------------------------")
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -3,607,559,223,027,243,000 | 39.150259 | 80 | 0.573407 | false |
lizquilty/roboticarm | commandline.py | 1 | 1269 | #!/usr/bin/python
# Unsure who wrote this code, found it on a website and cleaned up the wikihow page
# at http://www.wikihow.com/Use-a-USB-Robotic-Arm-with-a-Raspberry-Pi-(Maplin)
# - Liz Quilty
#ROBOT ARM CONTROL PROGRAM
#import the USB and Time librarys into Python
import usb.core, usb.util, time
#Allocate the name 'RoboArm' to the USB device
RoboArm = usb.core.find(idVendor=0x1267, idProduct=0x000)
#Check if the arm is detected and warn if not
if RoboArm is None:
raise ValueError("Arm not found")
#Create a variable for duration
Duration=1
#Define a procedure to execute each movement
def MoveArm(Duration, ArmCmd):
#Start the movement
RoboArm.ctrl_transfer(0x40,6,0x100,0,ArmCmd,1000)
#Stop the movement after waiting a specified duration
time.sleep(Duration)
ArmCmd=[0,0,0]
RoboArm.ctrl_transfer(0x40,6,0x100,0,ArmCmd,1000)
MoveArm(1,[0,1,0]) #Rotate base anti-clockwise
MoveArm(1,[0,2,0]) #Rotate base clockwise
MoveArm(1,[64,0,0]) #Shoulder up
MoveArm(1,[128,0,0]) #Shoulder down
MoveArm(1,[16,0,0]) #Elbow up
MoveArm(1,[32,0,0]) #Elbow down
MoveArm(1,[4,0,0]) #Wrist up
MoveArm(1,[8,0,0]) # Wrist down
MoveArm(1,[2,0,0]) #Grip open
MoveArm(1,[1,0,0]) #Grip close
MoveArm(1,[0,0,1]) #Light on
MoveArm(1,[0,0,0]) #Light off
| unlicense | -6,164,347,019,059,424,000 | 32.394737 | 84 | 0.719464 | false |
MrTheodor/espressopp | contrib/mpi4py/mpi4py-2.0.0/demo/osu_allgather.py | 8 | 1804 | # http://mvapich.cse.ohio-state.edu/benchmarks/
from mpi4py import MPI
def osu_bcast(
BENCHMARH = "MPI Allgather Latency Test",
skip = 1000,
loop = 10000,
skip_large = 10,
loop_large = 100,
large_message_size = 8192,
MAX_MSG_SIZE = 1<<20,
):
comm = MPI.COMM_WORLD
myid = comm.Get_rank()
numprocs = comm.Get_size()
if numprocs < 2:
if myid == 0:
errmsg = "This test requires at least two processes"
else:
errmsg = None
raise SystemExit(errmsg)
r_buf = allocate(MAX_MSG_SIZE*numprocs)
if myid == 0:
print ('# %s' % (BENCHMARH,))
if myid == 0:
print ('# %-8s%20s' % ("Size [B]", "Latency [us]"))
for size in message_sizes(MAX_MSG_SIZE):
if size > large_message_size:
skip = skip_large
loop = loop_large
iterations = list(range(loop+skip))
s_msg = MPI.IN_PLACE
r_msg = [r_buf, size, MPI.BYTE]
#
comm.Barrier()
for i in iterations:
if i == skip:
t_start = MPI.Wtime()
comm.Allgather(s_msg, r_msg)
t_end = MPI.Wtime()
comm.Barrier()
#
if myid == 0:
latency = (t_end - t_start) * 1e6 / loop
print ('%-10d%20.2f' % (size, latency))
def message_sizes(max_size):
return [0] + [(1<<i) for i in range(30)
if (1<<i) <= max_size]
def allocate(n):
try:
import mmap
return mmap.mmap(-1, n)
except (ImportError, EnvironmentError):
try:
from numpy import zeros
return zeros(n, 'B')
except ImportError:
from array import array
return array('B', [0]) * n
if __name__ == '__main__':
osu_bcast()
| gpl-3.0 | -1,122,397,448,340,107,100 | 24.055556 | 64 | 0.508315 | false |
renesugar/arrow | python/pyarrow/tests/test_cuda_numba_interop.py | 11 | 8730 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import pyarrow as pa
import numpy as np
dtypes = ['uint8', 'int16', 'float32']
cuda = pytest.importorskip("pyarrow.cuda")
nb_cuda = pytest.importorskip("numba.cuda")
from numba.cuda.cudadrv.devicearray import DeviceNDArray # noqa: E402
context_choices = None
context_choice_ids = ['pyarrow.cuda', 'numba.cuda']
def setup_module(module):
np.random.seed(1234)
ctx1 = cuda.Context()
nb_ctx1 = ctx1.to_numba()
nb_ctx2 = nb_cuda.current_context()
ctx2 = cuda.Context.from_numba(nb_ctx2)
module.context_choices = [(ctx1, nb_ctx1), (ctx2, nb_ctx2)]
def teardown_module(module):
del module.context_choices
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
ids=context_choice_ids)
def test_context(c):
ctx, nb_ctx = context_choices[c]
assert ctx.handle == nb_ctx.handle.value
assert ctx.handle == ctx.to_numba().handle.value
ctx2 = cuda.Context.from_numba(nb_ctx)
assert ctx.handle == ctx2.handle
size = 10
buf = ctx.new_buffer(size)
assert ctx.handle == buf.context.handle
def make_random_buffer(size, target='host', dtype='uint8', ctx=None):
"""Return a host or device buffer with random data.
"""
dtype = np.dtype(dtype)
if target == 'host':
assert size >= 0
buf = pa.allocate_buffer(size*dtype.itemsize)
arr = np.frombuffer(buf, dtype=dtype)
arr[:] = np.random.randint(low=0, high=255, size=size,
dtype=np.uint8)
return arr, buf
elif target == 'device':
arr, buf = make_random_buffer(size, target='host', dtype=dtype)
dbuf = ctx.new_buffer(size * dtype.itemsize)
dbuf.copy_from_host(buf, position=0, nbytes=buf.size)
return arr, dbuf
raise ValueError('invalid target value')
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
ids=context_choice_ids)
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
@pytest.mark.parametrize("size", [0, 1, 8, 1000])
def test_from_object(c, dtype, size):
ctx, nb_ctx = context_choices[c]
arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx)
# Creating device buffer from numba DeviceNDArray:
darr = nb_cuda.to_device(arr)
cbuf2 = ctx.buffer_from_object(darr)
assert cbuf2.size == cbuf.size
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
np.testing.assert_equal(arr, arr2)
# Creating device buffer from a slice of numba DeviceNDArray:
if size >= 8:
# 1-D arrays
for s in [slice(size//4, None, None),
slice(size//4, -(size//4), None)]:
cbuf2 = ctx.buffer_from_object(darr[s])
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
np.testing.assert_equal(arr[s], arr2)
# cannot test negative strides due to numba bug, see its issue 3705
if 0:
rdarr = darr[::-1]
cbuf2 = ctx.buffer_from_object(rdarr)
assert cbuf2.size == cbuf.size
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
np.testing.assert_equal(arr, arr2)
with pytest.raises(ValueError,
match=('array data is non-contiguous')):
ctx.buffer_from_object(darr[::2])
# a rectangular 2-D array
s1 = size//4
s2 = size//s1
assert s1 * s2 == size
cbuf2 = ctx.buffer_from_object(darr.reshape(s1, s2))
assert cbuf2.size == cbuf.size
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
np.testing.assert_equal(arr, arr2)
with pytest.raises(ValueError,
match=('array data is non-contiguous')):
ctx.buffer_from_object(darr.reshape(s1, s2)[:, ::2])
# a 3-D array
s1 = 4
s2 = size//8
s3 = size//(s1*s2)
assert s1 * s2 * s3 == size
cbuf2 = ctx.buffer_from_object(darr.reshape(s1, s2, s3))
assert cbuf2.size == cbuf.size
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
np.testing.assert_equal(arr, arr2)
with pytest.raises(ValueError,
match=('array data is non-contiguous')):
ctx.buffer_from_object(darr.reshape(s1, s2, s3)[::2])
# Creating device buffer from am object implementing cuda array
# interface:
class MyObj:
def __init__(self, darr):
self.darr = darr
@property
def __cuda_array_interface__(self):
return self.darr.__cuda_array_interface__
cbuf2 = ctx.buffer_from_object(MyObj(darr))
assert cbuf2.size == cbuf.size
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
np.testing.assert_equal(arr, arr2)
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
ids=context_choice_ids)
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
def test_numba_memalloc(c, dtype):
ctx, nb_ctx = context_choices[c]
dtype = np.dtype(dtype)
# Allocate memory using numba context
# Warning: this will not be reflected in pyarrow context manager
# (e.g bytes_allocated does not change)
size = 10
mem = nb_ctx.memalloc(size * dtype.itemsize)
darr = DeviceNDArray((size,), (dtype.itemsize,), dtype, gpu_data=mem)
darr[:5] = 99
darr[5:] = 88
np.testing.assert_equal(darr.copy_to_host()[:5], 99)
np.testing.assert_equal(darr.copy_to_host()[5:], 88)
# wrap numba allocated memory with CudaBuffer
cbuf = cuda.CudaBuffer.from_numba(mem)
arr2 = np.frombuffer(cbuf.copy_to_host(), dtype=dtype)
np.testing.assert_equal(arr2, darr.copy_to_host())
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
ids=context_choice_ids)
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
def test_pyarrow_memalloc(c, dtype):
ctx, nb_ctx = context_choices[c]
size = 10
arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx)
# wrap CudaBuffer with numba device array
mem = cbuf.to_numba()
darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem)
np.testing.assert_equal(darr.copy_to_host(), arr)
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
ids=context_choice_ids)
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
def test_numba_context(c, dtype):
ctx, nb_ctx = context_choices[c]
size = 10
with nb_cuda.gpus[0]:
arr, cbuf = make_random_buffer(size, target='device',
dtype=dtype, ctx=ctx)
assert cbuf.context.handle == nb_ctx.handle.value
mem = cbuf.to_numba()
darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem)
np.testing.assert_equal(darr.copy_to_host(), arr)
darr[0] = 99
cbuf.context.synchronize()
arr2 = np.frombuffer(cbuf.copy_to_host(), dtype=dtype)
assert arr2[0] == 99
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
ids=context_choice_ids)
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
def test_pyarrow_jit(c, dtype):
ctx, nb_ctx = context_choices[c]
@nb_cuda.jit
def increment_by_one(an_array):
pos = nb_cuda.grid(1)
if pos < an_array.size:
an_array[pos] += 1
# applying numba.cuda kernel to memory hold by CudaBuffer
size = 10
arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx)
threadsperblock = 32
blockspergrid = (arr.size + (threadsperblock - 1)) // threadsperblock
mem = cbuf.to_numba()
darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem)
increment_by_one[blockspergrid, threadsperblock](darr)
cbuf.context.synchronize()
arr1 = np.frombuffer(cbuf.copy_to_host(), dtype=arr.dtype)
np.testing.assert_equal(arr1, arr + 1)
| apache-2.0 | -928,748,390,832,582,100 | 36.148936 | 79 | 0.632646 | false |
tstenner/bleachbit | tests/TestGUI.py | 1 | 5872 | # vim: ts=4:sw=4:expandtab
# -*- coding: UTF-8 -*-
# BleachBit
# Copyright (C) 2008-2020 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for module GUI
"""
import os
import time
import types
import unittest
os.environ['LANGUAGE'] = 'en'
try:
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from bleachbit.GUI import Bleachbit
HAVE_GTK = True
except ImportError:
HAVE_GTK = False
import bleachbit
from bleachbit import _
from bleachbit.GuiPreferences import PreferencesDialog
from bleachbit.Options import options, Options
from tests import common
bleachbit.online_update_notification_enabled = False
@unittest.skipUnless(HAVE_GTK, 'requires GTK+ module')
class GUITestCase(common.BleachbitTestCase):
app = Bleachbit(auto_exit=False, uac=False)
options_get_tree = options.get_tree
"""Test case for module GUI"""
@classmethod
def setUpClass(cls):
super(GUITestCase, GUITestCase).setUpClass()
options.set('first_start', False)
options.set('check_online_updates', False) # avoid pop-up window
options.get_tree = types.MethodType(lambda self, parent, child: False, options)
cls.app.register()
cls.app.activate()
cls.refresh_gui()
@classmethod
def tearDownClass(cls):
super(GUITestCase, GUITestCase).tearDownClass()
options.get_tree = cls.options_get_tree
@classmethod
def refresh_gui(cls, delay=0):
while Gtk.events_pending():
Gtk.main_iteration_do(blocking=False)
time.sleep(delay)
@classmethod
def print_widget(cls, widget, indent=0):
print('{}{}'.format(' ' * indent, widget))
if isinstance(widget, Gtk.Container):
for c in widget.get_children():
cls.print_children(c, indent + 2)
@classmethod
def find_widget(cls, widget, widget_class, widget_label=None):
if isinstance(widget, widget_class):
if widget_label is None or widget.get_label() == widget_label:
return widget
if isinstance(widget, Gtk.Container):
for c in widget.get_children():
b = cls.find_widget(c, widget_class, widget_label)
if b is not None:
return b
return None
def click_button(self, dialog, label):
b = self.find_widget(dialog, Gtk.Button, label)
self.assertIsNotNone(b)
b.clicked()
self.refresh_gui()
def test_GUI(self):
"""Unit test for class GUI"""
# there should be no crashes
# app.do_startup()
# pp.do_activate() Build a unit test that that does this
gui = self.app._window
gui.update_progress_bar(0.0)
gui.update_progress_bar(1.0)
gui.update_progress_bar("status")
def test_preferences(self):
"""Opens the preferences dialog and closes it"""
# show preferences dialog
pref = self.app.get_preferences_dialog()
pref.dialog.show_all()
self.refresh_gui()
# click close button
self.click_button(pref.dialog, Gtk.STOCK_CLOSE)
# destroy
pref.dialog.destroy()
def test_diagnostics(self):
"""Opens the diagnostics dialog and closes it"""
dialog, txt = self.app.get_diagnostics_dialog()
dialog.show_all()
self.refresh_gui()
# click close button
self.click_button(dialog, Gtk.STOCK_CLOSE)
# destroy
dialog.destroy()
def test_about(self):
"""Opens the about dialog and closes it"""
about = self.app.get_about_dialog()
about.show_all()
self.refresh_gui()
# destroy
about.destroy()
def test_preview(self):
"""Select cleaner option and clicks preview button"""
gui = self.app._window
self.refresh_gui()
model = gui.view.get_model()
tree = self.find_widget(gui, Gtk.TreeView)
self.assertIsNotNone(tree)
def get_iter(model, cleaner):
it = model.get_iter(Gtk.TreePath(0))
while it:
if model[it][2] == cleaner:
return model.iter_children(it)
it = model.iter_next(it)
return None
def find_option(model, cleaner, option):
it = get_iter(model, cleaner)
self.assertIsNotNone(it)
while it:
if model[it][2] == option:
return it
it = model.iter_next(it)
return None
it = find_option(model, 'system', 'tmp')
self.assertIsNotNone(it)
tree.scroll_to_cell(model.get_path(it), None, False, 0, 0)
self.refresh_gui()
model[model.iter_parent(it)][1] = True
model[it][1] = True
self.refresh_gui()
b = self.click_button(gui, _("Preview"))
self.refresh_gui()
@unittest.skipIf(os.getenv('TRAVIS', 'f') == 'true', 'Not supported on Travis CI')
def test_notify(self):
"""Test a pop-up notification"""
from bleachbit.GUI import notify
notify('This is a test notification')
import time
time.sleep(1)
| gpl-3.0 | 1,718,997,126,102,214,100 | 29.905263 | 91 | 0.611887 | false |
milin/faceguard | facebook/views.py | 1 | 6332 | import requests
import simplejson as json
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.shortcuts import (
HttpResponse,
HttpResponseRedirect,
render_to_response,
)
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.views.generic import TemplateView
from facebook.forms import BlackListWordsForm
from facebook.models import (
BlackListedWords,
FacebookUser,
DeletedComments
)
from pyfb import Pyfb
from django.conf import settings
def facebook_login(request):
# Gets the auth redirect url with code provided from facebook.
facebook = Pyfb(
settings.CLIENT_ID,
permissions=settings.FACEBOOK_SCOPE
)
auth_code_url = facebook.get_auth_code_url(
redirect_uri=settings.REDIRECT_URL
)
return HttpResponseRedirect(auth_code_url)
def facebook_login_success(request):
code = request.GET.get('code')
facebook = Pyfb(settings.CLIENT_ID)
access_token = facebook.get_access_token(
settings.CLIENT_APP_SECRET,
code,
redirect_uri=settings.REDIRECT_URL
)
me = facebook.get_myself()
try:
fb_user = FacebookUser.objects.get(email_address=me.email)
fb_user.access_token = access_token
fb_user.save()
user = fb_user.user
except FacebookUser.DoesNotExist:
user = User.objects.create(
username=me.email,
first_name=me.first_name,
last_name=me.last_name,
email=me.email
)
user.set_password(me.email)
user.save()
fb_user = FacebookUser.objects.create(
first_name=me.first_name,
last_name=me.last_name,
access_token=access_token,
email_address=me.email,
username=me.email,
user=user,
)
user = authenticate(
username=me.email,
password=me.email
)
# log the user in
login(request, user)
return HttpResponseRedirect(reverse('blacklist_words'))
class Facebook(TemplateView):
feed_url = 'https://graph.facebook.com/me/feed'
delete_url = 'https://graph.facebook.com/v2.3/{}'
feed = None
def __init__(self):
self.blacklist_comments = []
super(Facebook, self).__init__()
def signed_url(self, url, access_token):
return '{}?access_token={}'.format(url, access_token)
def get_feed(self, access_token=None):
response = requests.get(self.signed_url(self.feed_url, access_token))
self.feed = json.loads(response.content)['data']
return self.feed
def get_comments_having_blacklisted_words(self, feeds, user):
for feed in feeds:
self.get_blacklisted_comments_from_post(feed, user)
def get_blacklisted_comments_from_post(self, feed, user):
fb_user = FacebookUser.objects.get(user=user)
blacklist_words = [bl.word for bl in BlackListedWords.objects.filter(user=fb_user)]
try:
for comment in feed['comments']['data']:
if set(comment['message'].split(' ')).intersection(set(blacklist_words)):
self.blacklist_comments.append(comment)
except Exception as e:
print e
def get(self, request):
user = request.user
fb_user = FacebookUser.objects.get(user=user)
self.start_process(fb_user)
return HttpResponse(json.dumps(self.blacklist_comments))
def start_process(self, fb_user):
feeds = self.get_feed(access_token=fb_user.access_token)
self.get_comments_having_blacklisted_words(feeds, fb_user.user)
self.delete_them(fb_user)
def delete_them(self, fb_user):
access_token = fb_user.access_token
for comment in self.blacklist_comments:
response = requests.delete(self.signed_url(
self.delete_url.format(comment['id']),
access_token
))
if response.status_code > 200:
self.store_comment_to_be_deleted(fb_user, comment)
# Send email to user
send_mail(
'Facebook blacklist comment deleted',
'This message was deleted:\n {}'.format(comment['message']),
settings.SENDER_EMAIL,
[fb_user.user.email],
)
# Send email to admin
send_mail(
'Facebook blacklist comment deleted',
'This message was deleted:\n {}'.format(comment['message']),
settings.SENDER_EMAIL,
[settings.SENDER_EMAIL],
)
else:
return HttpResponse(response.content)
def store_comment_to_be_deleted(self, fb_user, comment):
"""
Store the comment to be deleted and it's metadata for reviewing
purposes.
"""
DeletedComments.objects.create(
message=comment['message'],
message_by=comment['from']['name'],
message_id=comment['id'],
user=fb_user,
)
@login_required
def blacklist_words(request):
form = BlackListWordsForm(request.user)
try:
fb_user = FacebookUser.objects.get(user=request.user)
except FacebookUser.DoesNotExist:
# TODO Proper 404
HttpResponse("Not Found")
if request.method == 'GET':
initial = BlackListedWords.objects.filter(user=fb_user)
initial_words = []
if initial:
initial_words = [i.word for i in initial]
request_context = RequestContext(request)
request_context.push({
'form': form,
'user': request.user,
'initial_words': initial_words
})
return render_to_response('blacklist_words.html', request_context)
else:
form = BlackListWordsForm(request.user, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('blacklist_words'))
else:
return HttpResponse(form.errors)
def homepage(request):
return render_to_response('homepage.html')
| mit | 2,414,990,138,891,924,500 | 32.151832 | 91 | 0.611339 | false |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/crud/selecting.py | 1 | 2688 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module provide utility functions for select operation.
"""
from sqlalchemy import select, func, Column
try:
from ..pkg.prettytable import from_db_cursor
except: # pragma: no cover
from sqlalchemy_mate.pkg.prettytable import from_db_cursor
def count_row(engine, table):
"""
Return number of rows in a table.
Example::
>>> count_row(engine, table_user)
3
**中文文档**
返回一个表中的行数。
"""
return engine.execute(table.count()).fetchone()[0]
def select_all(engine, table):
"""
Select everything from a table.
Example::
>>> list(select_all(engine, table_user))
[(1, "Alice"), (2, "Bob"), (3, "Cathy")]
**中文文档**
选取所有数据。
"""
s = select([table])
return engine.execute(s)
def select_single_column(engine, column):
"""
Select data from single column.
Example::
>>> select_single_column(engine, table_user.c.id)
[1, 2, 3]
>>> select_single_column(engine, table_user.c.name)
["Alice", "Bob", "Cathy"]
"""
s = select([column])
return column.name, [row[0] for row in engine.execute(s)]
def select_many_column(engine, *columns):
"""
Select data from multiple columns.
Example::
>>> select_many_column(engine, table_user.c.id, table_user.c.name)
:param columns: list of sqlalchemy.Column instance
:returns headers: headers
:returns data: list of row
**中文文档**
返回多列中的数据。
"""
if isinstance(columns[0], Column):
pass
elif isinstance(columns[0], (list, tuple)):
columns = columns[0]
s = select(columns)
headers = [str(column) for column in columns]
data = [tuple(row) for row in engine.execute(s)]
return headers, data
def select_distinct_column(engine, *columns):
"""
Select distinct column(columns).
:returns: if single column, return list, if multiple column, return matrix.
**中文文档**
distinct语句的语法糖函数。
"""
if isinstance(columns[0], Column):
pass
elif isinstance(columns[0], (list, tuple)): # pragma: no cover
columns = columns[0]
s = select(columns).distinct()
if len(columns) == 1:
return [row[0] for row in engine.execute(s)]
else:
return [tuple(row) for row in engine.execute(s)]
def select_random(engine, table_or_columns, limit=5):
"""
Randomly select some rows from table.
"""
s = select(table_or_columns).order_by(func.random()).limit(limit)
return engine.execute(s).fetchall()
| mit | 7,582,632,062,057,396,000 | 20.731092 | 79 | 0.609049 | false |
MacHu-GWU/single_file_module-project | sfm/fingerprint.py | 1 | 6804 | # -*- coding: utf-8 -*-
"""
This module is built on Python standard hashlib, provides utility method
to find hash value for a bytes, a string, a Python object or a file.
Import this module::
>>> from sfm.fingerprint import fingerprint
Example::
>>> fingerprint.of_bytes(bytes(16))
>>> fingerprint.of_text("Hello World")
>>> fingerprint.of_pyobj(dict(a=1, b=2, c=3))
>>> fingerprint.of_file("fingerprint.py")
You can switch the hash algorithm to use::
>>> fingerprint.use("md5") # also "sha1", "sha256", "sha512"
**中文文档**
本模块提供了一些计算Hash值的简便方法。对于 of_pyobj()方法来说, 请注意在读写时
均使用相同的Python大版本(2/3)。
"""
from six import PY2, PY3, text_type, binary_type
import pickle
import hashlib
if PY2: # pragma: no cover
default_pk_protocol = 2
elif PY3: # pragma: no cover
default_pk_protocol = 2
class FingerPrint(object):
"""A hashlib wrapper class allow you to use one line to do hash as you wish.
:type algorithm: str
:param algorithm: default "md5"
Usage::
>>> from sfm.fingerprint import fingerprint
>>> print(fingerprint.of_bytes(bytes(123)))
b1fec41621e338896e2d26f232a6b006
>>> print(fingerprint.of_text("message"))
78e731027d8fd50ed642340b7c9a63b3
>>> print(fingerprint.of_pyobj({"key": "value"}))
4c502ab399c89c8758a2d8c37be98f69
>>> print(fingerprint.of_file("fingerprint.py"))
4cddcb5562cbff652b0e4c8a0300337a
"""
_mapper = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
def __init__(self, algorithm="md5", pk_protocol=default_pk_protocol):
self.hash_algo = hashlib.md5
self.return_int = False
self.pk_protocol = 2
self.use(algorithm)
self.set_return_str()
self.set_pickle_protocol(pk_protocol)
def use(self, algorithm):
"""Change the hash algorithm you gonna use.
"""
try:
self.hash_algo = self._mapper[algorithm.strip().lower()]
except IndexError: # pragma: no cover
template = "'%s' is not supported, try one of %s."
raise ValueError(template % (algorithm, list(self._mapper)))
def use_md5(self):
"""
Use md5 hash algorithm.
"""
self.use("md5")
def use_sha1(self):
"""
Use sha1 hash algorithm.
"""
self.use("sha1")
def use_sha256(self):
"""
Use sha256 hash algorithm.
"""
self.use("sha256")
def use_sha512(self):
"""
Use sha512 hash algorithm.
"""
self.use("sha512")
def digest_to_int(self, digest):
"""Convert hexdigest str to int.
"""
return int(digest, 16)
def set_return_int(self):
"""Set to return hex integer.
"""
self.return_int = True
def set_return_str(self):
"""Set to return hex string.
"""
self.return_int = False
def set_pickle_protocol(self, pk_protocol):
"""Set pickle protocol.
"""
if pk_protocol not in [2, 3]:
raise ValueError("pickle protocol has to be 2 or 3!")
self.pk_protocol = pk_protocol
def set_pickle2(self):
"""
Set pickle protocol to 2.
"""
self.set_pickle_protocol(2)
def set_pickle3(self):
"""
Set pickle protocol to 3.
"""
self.set_pickle_protocol(3)
def digest(self, hash_method):
if self.return_int:
return int(hash_method.hexdigest(), 16)
else:
return hash_method.hexdigest()
# hash function
def of_bytes(self, py_bytes):
"""
Use default hash method to return hash value of bytes.
:type py_bytes: binary_type
:param py_bytes: a binary object
"""
m = self.hash_algo()
m.update(py_bytes)
return self.digest(m)
def of_text(self, text, encoding="utf-8"):
"""
Use default hash method to return hash value of a piece of string
default setting use 'utf-8' encoding.
:type text: text_type
:param text: a text object
"""
m = self.hash_algo()
m.update(text.encode(encoding))
return self.digest(m)
def of_pyobj(self, pyobj):
"""
Use default hash method to return hash value of a piece of Python
picklable object.
:param pyobj: any python object
"""
m = self.hash_algo()
m.update(pickle.dumps(pyobj, protocol=self.pk_protocol))
return self.digest(m)
def of_file(self, abspath, nbytes=0, chunk_size=1024):
"""
Use default hash method to return hash value of a piece of a file
Estimate processing time on:
:type abspath: text_type
:param abspath: the absolute path to the file.
:type nbytes: int
:param nbytes: only has first N bytes of the file. if 0, hash all file.
:type chunk_size: int
:param chunk_size: The max memory we use at one time.
CPU = i7-4600U 2.10GHz - 2.70GHz, RAM = 8.00 GB
1 second can process 0.25GB data
- 0.59G - 2.43 sec
- 1.3G - 5.68 sec
- 1.9G - 7.72 sec
- 2.5G - 10.32 sec
- 3.9G - 16.0 sec
ATTENTION:
if you change the meta data (for example, the title, years
information in audio, video) of a multi-media file, then the hash
value gonna also change.
"""
if nbytes < 0:
raise ValueError("chunk_size cannot smaller than 0")
if chunk_size < 1:
raise ValueError("chunk_size cannot smaller than 1")
if (nbytes > 0) and (nbytes < chunk_size):
chunk_size = nbytes
m = self.hash_algo()
with open(abspath, "rb") as f:
if nbytes: # use first n bytes
have_reads = 0
while True:
have_reads += chunk_size
if have_reads > nbytes:
n = nbytes - (have_reads - chunk_size)
if n:
data = f.read(n)
m.update(data)
break
else:
data = f.read(chunk_size)
m.update(data)
else: # use entire content
while True:
data = f.read(chunk_size)
if not data:
break
m.update(data)
return m.hexdigest()
fingerprint = FingerPrint()
| mit | 2,079,036,333,616,093,000 | 26.412245 | 80 | 0.544818 | false |
reiaaoyama/exabgp | lib/exabgp/configuration/neighbor/parser.py | 3 | 3406 | # encoding: utf-8
"""
neighbor/parser.py
Created by Thomas Mangin on 2014-07-01.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from string import ascii_letters
from string import digits
from exabgp.bgp.message.open.routerid import RouterID
from exabgp.bgp.message.open.holdtime import HoldTime
from exabgp.configuration.parser import string
def inherit (tokeniser):
if len(tokeniser.tokens) == 2:
return [tokeniser()]
if len(tokeniser.tokens) < 4 or tokeniser.tokens[1] != '[' or tokeniser.tokens[-1] != ']':
raise ValueError('invalid inherit list')
return tokeniser.tokens[2:-1]
def hostname (tokeniser):
value = string(tokeniser)
if not value[0].isalnum() or value[0].isdigit():
raise ValueError('bad host-name (alphanumeric)')
if not value[-1].isalnum() or value[-1].isdigit():
raise ValueError('bad host-name (alphanumeric)')
if '..' in value:
raise ValueError('bad host-name (double colon)')
if not all(True if c in ascii_letters + digits + '.-' else False for c in value):
raise ValueError('bad host-name (charset)')
if len(value) > 255:
raise ValueError('bad host-name (length)')
return value
def domainname (tokeniser):
value = string(tokeniser)
if not value:
raise ValueError('bad domain-name')
if not value[0].isalnum() or value[0].isdigit():
raise ValueError('bad domain-name')
if not value[-1].isalnum() or value[-1].isdigit():
raise ValueError('bad domain-name')
if '..' in value:
raise ValueError('bad domain-name')
if not all(True if c in ascii_letters + digits + '.-' else False for c in value):
raise ValueError('bad domain-name')
if len(value) > 255:
raise ValueError('bad domain-name (length)')
return value
def description (tokeniser):
try:
return string(tokeniser)
except:
raise ValueError('bad neighbor description')
def md5 (tokeniser):
value = tokeniser()
if len(value) > 80:
raise ValueError('MD5 password must be no larger than 80 characters')
if not value:
raise ValueError('value requires the value password as an argument (quoted or unquoted). FreeBSD users should use "kernel" as the argument.')
return value
def ttl (tokeniser):
value = tokeniser()
try:
attl = int(value)
except ValueError:
if value in ('false','disable','disabled'):
return None
raise ValueError('invalid ttl-security "%s"' % value)
if attl < 0:
raise ValueError('ttl-security can not be negative')
if attl >= 255:
raise ValueError('ttl must be smaller than 256')
return attl
def router_id (tokeniser):
value = tokeniser()
try:
return RouterID(value)
except ValueError:
raise ValueError ('"%s" is an invalid router-id' % value)
def hold_time (tokeniser):
value = tokeniser()
try:
holdtime = HoldTime(int(value))
except ValueError:
raise ValueError ('"%s" is an invalid hold-time' % value)
if holdtime < 3 and hold_time != 0:
raise ValueError('holdtime must be zero or at least three seconds')
if holdtime > HoldTime.MAX:
raise ValueError('holdtime must be smaller or equal to %d' % HoldTime.MAX)
return holdtime
def processes (tokeniser):
result = []
token = tokeniser()
if token != '[':
raise ValueError('invalid processes, does not starts with [')
while True:
token = tokeniser()
if not token:
raise ValueError('invalid processes, does not ends with ]')
if token == ']':
break
if token == ',':
continue
result.append(token)
return result
| bsd-3-clause | 5,191,455,710,211,270,000 | 25.818898 | 144 | 0.704932 | false |
dscho/hg | mercurial/dagparser.py | 2 | 14741 | # dagparser.py - parser and generator for concise description of DAGs
#
# Copyright 2010 Peter Arrenbrecht <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import re
import string
from .i18n import _
from . import error
def parsedag(desc):
'''parses a DAG from a concise textual description; generates events
"+n" is a linear run of n nodes based on the current default parent
"." is a single node based on the current default parent
"$" resets the default parent to -1 (implied at the start);
otherwise the default parent is always the last node created
"<p" sets the default parent to the backref p
"*p" is a fork at parent p, where p is a backref
"*p1/p2/.../pn" is a merge of parents p1..pn, where the pi are backrefs
"/p2/.../pn" is a merge of the preceding node and p2..pn
":name" defines a label for the preceding node; labels can be redefined
"@text" emits an annotation event for text
"!command" emits an action event for the current node
"!!my command\n" is like "!", but to the end of the line
"#...\n" is a comment up to the end of the line
Whitespace between the above elements is ignored.
A backref is either
* a number n, which references the node curr-n, where curr is the current
node, or
* the name of a label you placed earlier using ":name", or
* empty to denote the default parent.
All string valued-elements are either strictly alphanumeric, or must
be enclosed in double quotes ("..."), with "\" as escape character.
Generates sequence of
('n', (id, [parentids])) for node creation
('l', (id, labelname)) for labels on nodes
('a', text) for annotations
('c', command) for actions (!)
('C', command) for line actions (!!)
Examples
--------
Example of a complex graph (output not shown for brevity):
>>> len(list(parsedag("""
...
... +3 # 3 nodes in linear run
... :forkhere # a label for the last of the 3 nodes from above
... +5 # 5 more nodes on one branch
... :mergethis # label again
... <forkhere # set default parent to labeled fork node
... +10 # 10 more nodes on a parallel branch
... @stable # following nodes will be annotated as "stable"
... +5 # 5 nodes in stable
... !addfile # custom command; could trigger new file in next node
... +2 # two more nodes
... /mergethis # merge last node with labeled node
... +4 # 4 more nodes descending from merge node
...
... """)))
34
Empty list:
>>> list(parsedag(""))
[]
A simple linear run:
>>> list(parsedag("+3"))
[('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
Some non-standard ways to define such runs:
>>> list(parsedag("+1+2"))
[('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
>>> list(parsedag("+1*1*"))
[('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
>>> list(parsedag("*"))
[('n', (0, [-1]))]
>>> list(parsedag("..."))
[('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
A fork and a join, using numeric back references:
>>> list(parsedag("+2*2*/2"))
[('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), ('n', (3, [2, 1]))]
>>> list(parsedag("+2<2+1/2"))
[('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), ('n', (3, [2, 1]))]
Placing a label:
>>> list(parsedag("+1 :mylabel +1"))
[('n', (0, [-1])), ('l', (0, 'mylabel')), ('n', (1, [0]))]
An empty label (silly, really):
>>> list(parsedag("+1:+1"))
[('n', (0, [-1])), ('l', (0, '')), ('n', (1, [0]))]
Fork and join, but with labels instead of numeric back references:
>>> list(parsedag("+1:f +1:p2 *f */p2"))
[('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), ('l', (1, 'p2')),
('n', (2, [0])), ('n', (3, [2, 1]))]
>>> list(parsedag("+1:f +1:p2 <f +1 /p2"))
[('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), ('l', (1, 'p2')),
('n', (2, [0])), ('n', (3, [2, 1]))]
Restarting from the root:
>>> list(parsedag("+1 $ +1"))
[('n', (0, [-1])), ('n', (1, [-1]))]
Annotations, which are meant to introduce sticky state for subsequent nodes:
>>> list(parsedag("+1 @ann +1"))
[('n', (0, [-1])), ('a', 'ann'), ('n', (1, [0]))]
>>> list(parsedag('+1 @"my annotation" +1'))
[('n', (0, [-1])), ('a', 'my annotation'), ('n', (1, [0]))]
Commands, which are meant to operate on the most recently created node:
>>> list(parsedag("+1 !cmd +1"))
[('n', (0, [-1])), ('c', 'cmd'), ('n', (1, [0]))]
>>> list(parsedag('+1 !"my command" +1'))
[('n', (0, [-1])), ('c', 'my command'), ('n', (1, [0]))]
>>> list(parsedag('+1 !!my command line\\n +1'))
[('n', (0, [-1])), ('C', 'my command line'), ('n', (1, [0]))]
Comments, which extend to the end of the line:
>>> list(parsedag('+1 # comment\\n+1'))
[('n', (0, [-1])), ('n', (1, [0]))]
Error:
>>> try: list(parsedag('+1 bad'))
... except Exception, e: print e
invalid character in dag description: bad...
'''
if not desc:
return
wordchars = string.ascii_letters + string.digits
labels = {}
p1 = -1
r = 0
def resolve(ref):
if not ref:
return p1
elif ref[0] in string.digits:
return r - int(ref)
else:
return labels[ref]
chiter = (c for c in desc)
def nextch():
return next(chiter, '\0')
def nextrun(c, allow):
s = ''
while c in allow:
s += c
c = nextch()
return c, s
def nextdelimited(c, limit, escape):
s = ''
while c != limit:
if c == escape:
c = nextch()
s += c
c = nextch()
return nextch(), s
def nextstring(c):
if c == '"':
return nextdelimited(nextch(), '"', '\\')
else:
return nextrun(c, wordchars)
c = nextch()
while c != '\0':
while c in string.whitespace:
c = nextch()
if c == '.':
yield 'n', (r, [p1])
p1 = r
r += 1
c = nextch()
elif c == '+':
c, digs = nextrun(nextch(), string.digits)
n = int(digs)
for i in xrange(0, n):
yield 'n', (r, [p1])
p1 = r
r += 1
elif c in '*/':
if c == '*':
c = nextch()
c, pref = nextstring(c)
prefs = [pref]
while c == '/':
c, pref = nextstring(nextch())
prefs.append(pref)
ps = [resolve(ref) for ref in prefs]
yield 'n', (r, ps)
p1 = r
r += 1
elif c == '<':
c, ref = nextstring(nextch())
p1 = resolve(ref)
elif c == ':':
c, name = nextstring(nextch())
labels[name] = p1
yield 'l', (p1, name)
elif c == '@':
c, text = nextstring(nextch())
yield 'a', text
elif c == '!':
c = nextch()
if c == '!':
cmd = ''
c = nextch()
while c not in '\n\r\0':
cmd += c
c = nextch()
yield 'C', cmd
else:
c, cmd = nextstring(c)
yield 'c', cmd
elif c == '#':
while c not in '\n\r\0':
c = nextch()
elif c == '$':
p1 = -1
c = nextch()
elif c == '\0':
return # in case it was preceded by whitespace
else:
s = ''
i = 0
while c != '\0' and i < 10:
s += c
i += 1
c = nextch()
raise error.Abort(_('invalid character in dag description: '
'%s...') % s)
def dagtextlines(events,
addspaces=True,
wraplabels=False,
wrapannotations=False,
wrapcommands=False,
wrapnonlinear=False,
usedots=False,
maxlinewidth=70):
'''generates single lines for dagtext()'''
def wrapstring(text):
if re.match("^[0-9a-z]*$", text):
return text
return '"' + text.replace('\\', '\\\\').replace('"', '\"') + '"'
def gen():
labels = {}
run = 0
wantr = 0
needroot = False
for kind, data in events:
if kind == 'n':
r, ps = data
# sanity check
if r != wantr:
raise error.Abort(_("expected id %i, got %i") % (wantr, r))
if not ps:
ps = [-1]
else:
for p in ps:
if p >= r:
raise error.Abort(_("parent id %i is larger than "
"current id %i") % (p, r))
wantr += 1
# new root?
p1 = r - 1
if len(ps) == 1 and ps[0] == -1:
if needroot:
if run:
yield '+' + str(run)
run = 0
if wrapnonlinear:
yield '\n'
yield '$'
p1 = -1
else:
needroot = True
if len(ps) == 1 and ps[0] == p1:
if usedots:
yield "."
else:
run += 1
else:
if run:
yield '+' + str(run)
run = 0
if wrapnonlinear:
yield '\n'
prefs = []
for p in ps:
if p == p1:
prefs.append('')
elif p in labels:
prefs.append(labels[p])
else:
prefs.append(str(r - p))
yield '*' + '/'.join(prefs)
else:
if run:
yield '+' + str(run)
run = 0
if kind == 'l':
rid, name = data
labels[rid] = name
yield ':' + name
if wraplabels:
yield '\n'
elif kind == 'c':
yield '!' + wrapstring(data)
if wrapcommands:
yield '\n'
elif kind == 'C':
yield '!!' + data
yield '\n'
elif kind == 'a':
if wrapannotations:
yield '\n'
yield '@' + wrapstring(data)
elif kind == '#':
yield '#' + data
yield '\n'
else:
raise error.Abort(_("invalid event type in dag: %s")
% str((type, data)))
if run:
yield '+' + str(run)
line = ''
for part in gen():
if part == '\n':
if line:
yield line
line = ''
else:
if len(line) + len(part) >= maxlinewidth:
yield line
line = ''
elif addspaces and line and part != '.':
line += ' '
line += part
if line:
yield line
def dagtext(dag,
addspaces=True,
wraplabels=False,
wrapannotations=False,
wrapcommands=False,
wrapnonlinear=False,
usedots=False,
maxlinewidth=70):
'''generates lines of a textual representation for a dag event stream
events should generate what parsedag() does, so:
('n', (id, [parentids])) for node creation
('l', (id, labelname)) for labels on nodes
('a', text) for annotations
('c', text) for commands
('C', text) for line commands ('!!')
('#', text) for comment lines
Parent nodes must come before child nodes.
Examples
--------
Linear run:
>>> dagtext([('n', (0, [-1])), ('n', (1, [0]))])
'+2'
Two roots:
>>> dagtext([('n', (0, [-1])), ('n', (1, [-1]))])
'+1 $ +1'
Fork and join:
>>> dagtext([('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])),
... ('n', (3, [2, 1]))])
'+2 *2 */2'
Fork and join with labels:
>>> dagtext([('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])),
... ('l', (1, 'p2')), ('n', (2, [0])), ('n', (3, [2, 1]))])
'+1 :f +1 :p2 *f */p2'
Annotations:
>>> dagtext([('n', (0, [-1])), ('a', 'ann'), ('n', (1, [0]))])
'+1 @ann +1'
>>> dagtext([('n', (0, [-1])),
... ('a', 'my annotation'),
... ('n', (1, [0]))])
'+1 @"my annotation" +1'
Commands:
>>> dagtext([('n', (0, [-1])), ('c', 'cmd'), ('n', (1, [0]))])
'+1 !cmd +1'
>>> dagtext([('n', (0, [-1])), ('c', 'my command'), ('n', (1, [0]))])
'+1 !"my command" +1'
>>> dagtext([('n', (0, [-1])),
... ('C', 'my command line'),
... ('n', (1, [0]))])
'+1 !!my command line\\n+1'
Comments:
>>> dagtext([('n', (0, [-1])), ('#', ' comment'), ('n', (1, [0]))])
'+1 # comment\\n+1'
>>> dagtext([])
''
Combining parsedag and dagtext:
>>> dagtext(parsedag('+1 :f +1 :p2 *f */p2'))
'+1 :f +1 :p2 *f */p2'
'''
return "\n".join(dagtextlines(dag,
addspaces,
wraplabels,
wrapannotations,
wrapcommands,
wrapnonlinear,
usedots,
maxlinewidth))
| gpl-2.0 | -230,478,098,196,292,350 | 29.710417 | 80 | 0.395631 | false |
JustinHop/volti | src/volti/gtk3/main.py | 2 | 12148 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Milan Nikolic <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from subprocess import Popen, PIPE
from signal import SIGTERM
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from dbus.exceptions import DBusException
try:
from volti.defs import *
from volti.dbusservice import DBusService
from volti.utils import log, which, find_term, get_pid_app, get_icon_name
from volti.gtk3.scale import VolumeScale
from volti.gtk3.menu import PopupMenu
from volti.gtk3.preferences import Preferences
except ImportError:
sys.stderr.write("Can't import application modules\nExiting\n")
sys.exit(1)
if HAS_PYALSA:
from volti.alsactl import PyAlsaControl as AlsaControl
elif HAS_PYALSAAUDIO:
from volti.alsaaudioctl import PyAlsaAudioControl as AlsaControl
else:
sys.stderr.write("This program needs pyalsa 1.0.23 or pyalsaaudio 0.6\nExiting\n")
sys.exit(1)
class VolumeTray(Gtk.StatusIcon):
""" GTK+ application for controlling audio volume
from system tray/notification area """
def __init__(self):
""" Constructor """
Gtk.StatusIcon.__init__(self)
self.init_prefs()
self.lock = False
self.lockid = None
self.notify = None
self.key_press = False
self.keys_events = None
self.pid_app = get_pid_app()
self.alsactrl = AlsaControl(
self.card_index, self.control, self)
self.menu = PopupMenu(self)
self.scale = VolumeScale(self)
self.dbus = DBusService(self)
if self.keys:
self.init_keys_events()
if self.show_notify:
self.init_notify()
self.connect("button_press_event", self.on_button_press_event)
self.connect("scroll_event", self.on_scroll_event)
self.connect("popup_menu", self.on_popup_menu)
# set current volume
self.update(reopen=False)
# watch for changes
fd, eventmask = self.alsactrl.get_descriptors()
self.watchid = GObject.io_add_watch(fd, eventmask, self.update)
def init_prefs(self):
""" Initialize preferences """
self.preferences = Preferences(self)
self.card_index = int(PREFS["card_index"])
self.control = PREFS["control"]
self.toggle = PREFS["toggle"]
self.mixer = PREFS["mixer"]
self.mixer_internal = bool(int(PREFS["mixer_internal"]))
self.icon_theme = PREFS["icon_theme"]
self.show_tooltip = bool(int(PREFS["show_tooltip"]))
self.run_in_terminal = bool(int(PREFS["run_in_terminal"]))
self.scale_increment = float(PREFS["scale_increment"])
self.scale_show_value = bool(int(PREFS["scale_show_value"]))
self.keys = bool(int(PREFS["keys"]))
self.show_notify = bool(int(PREFS["show_notify"]))
self.notify_timeout = float(PREFS["notify_timeout"])
self.notify_position = bool(int(PREFS["notify_position"]))
self.notify_body = PREFS["notify_body"]
def init_keys_events(self):
""" Initialize keys events """
if self.keys_events:
if hasattr(self.keys_events, "stop"):
self.keys_events.stop()
del self.keys_events
self.keys_events = None
if not self.keys:
return
if HAS_XLIB:
try:
from volti.xlibevent import XlibEvent
self.keys_events = XlibEvent(self)
self.keys_events.start()
except Exception, err:
log.exception(str(err))
self.keys_events = None
else:
log.warn("Xlib backend needs python-xlib 0.15rc1 or higher\n")
self.keys_events = None
def init_notify(self):
""" Initialize desktop notifications """
if self.notify:
self.notify.close()
del self.notify
self.notify = None
if self.show_notify:
try:
from volti.notification import Notification
self.notify = Notification(self)
except Exception, err:
log.exception(str(err))
self.notify = None
def on_volume_changed(self, widget=None, data=None):
""" Callback for scale value_changed signal """
if self.lock:
return
if self.lockid:
GObject.source_remove(self.lockid)
self.lockid = None
self.lock = True
volume = int(self.scale.slider.get_value())
self.alsactrl.set_volume(volume)
vol = self.get_volume()
icon = get_icon_name(vol)
self.update_icon(vol, icon)
if self.show_tooltip:
self.update_tooltip(vol)
if self.key_press:
if self.show_notify and self.notify:
self.update_notify(vol, icon)
self.lockid = GObject.timeout_add(10, self._unlock)
def _unlock(self):
""" Unlock scale """
self.lock = False
self.lockid = None
self.key_press = False
return False
def on_button_press_event(self, widget, event, data=None):
""" Callback for button_press_event """
if event.button == 1:
self.scale.toggle_window()
elif event.button == 2:
if self.toggle == "mute":
self.change_volume("mute")
elif self.toggle == "mixer":
self.menu.toggle_mixer.set_active(
not self.menu.toggle_mixer.get_active())
def on_scroll_event(self, widget, event):
""" Callback for scroll_event """
if event.direction == Gdk.ScrollDirection.UP:
self.change_volume("up")
elif event.direction == Gdk.ScrollDirection.DOWN:
self.change_volume("down")
if self.show_notify and self.notify:
self.notify.close()
def on_popup_menu(self, status, button, time):
""" Show popup menu """
self.menu.toggle_mixer.handler_block(self.menu.mixer_handler_id)
self.menu.toggle_mixer.set_active(self.mixer_get_active())
self.menu.toggle_mixer.handler_unblock(self.menu.mixer_handler_id)
self.menu.toggle_mute.handler_block(self.menu.mute_handler_id)
self.menu.toggle_mute.set_active(self.alsactrl.is_muted())
self.menu.toggle_mute.handler_unblock(self.menu.mute_handler_id)
self.menu.popup_for_device(None, None, None,
Gtk.StatusIcon.position_menu, self, button, time)
def change_volume(self, event, key_press=False):
""" Change volume """
self.key_press = key_press
volume = self.scale.slider.get_value()
if event == "up":
volume = min(100, volume + self.scale_increment)
elif event == "down":
volume = max(0, volume - self.scale_increment)
if event == "mute":
self.menu.toggle_mute.set_active(
not self.menu.toggle_mute.get_active())
else:
self.menu.toggle_mute.set_active(False)
self.set_volume(volume)
def get_status_info(self, volume):
""" Returns status information """
var = "" if volume == _("Muted") else "%"
card_name = self.alsactrl.get_card_name()
mixer_name = self.alsactrl.get_mixer_name()
return var, card_name, mixer_name
def set_volume(self, volume):
""" Set volume """
if volume != self.scale.slider.get_value():
self.scale.slider.set_value(volume)
else:
self.scale.slider.emit("value_changed")
def get_volume(self):
""" Get volume """
if self.alsactrl.is_muted():
return _("Muted")
else:
return self.alsactrl.get_volume()
def update_icon(self, volume, icon):
""" Update icon """
if self.icon_theme != "Default":
icon = os.path.abspath(os.path.join(RES_DIR, "icons",
self.icon_theme, "32x32", icon+".png"))
self.set_from_file(icon)
else:
self.set_from_icon_name(icon)
def update_tooltip(self, volume):
""" Update tooltip """
var, card_name, mixer_name = self.get_status_info(volume)
tooltip = "<b>%s: %s%s </b>\n<small>%s: %s\n%s: %s</small>" % (
_("Output"), volume, var, _("Card"),
card_name, _("Mixer"), mixer_name)
self.set_tooltip_markup(tooltip)
def update_notify(self, volume, icon):
""" Update notification """
if self.icon_theme != "Default":
icon = os.path.abspath(os.path.join(RES_DIR, "icons",
self.icon_theme, "48x48", icon+".png"))
try:
self.notify.show(icon,
self.notify_body, self.notify_timeout, volume)
except DBusException:
del self.notify
self.notify = None
self.init_notify()
self.notify.show(icon,
self.notify_body, self.notify_timeout, volume)
def update(self, source=None, condition=None, reopen=True):
""" Update volume """
if self.lock:
return True
try:
if reopen:
del self.alsactrl
self.alsactrl = AlsaControl(
self.card_index, self.control, self)
volume = self.alsactrl.get_volume()
Gdk.threads_enter()
self.set_volume(volume)
Gdk.threads_leave()
return True
except Exception, err:
log.exception(str(err))
return False
def toggle_mute(self, widget=None):
""" Toggle mute status """
self.alsactrl.set_mute(widget.get_active())
volume = self.get_volume()
icon = get_icon_name(volume)
self.update_icon(volume, icon)
if self.show_tooltip:
self.update_tooltip(volume)
def toggle_mixer(self, widget=None):
""" Toggle mixer application """
mixer = "volti-mixer" if self.mixer_internal else self.mixer
if not mixer:
return
try:
pid = self.mixer_get_pid()
if pid:
os.kill(pid, SIGTERM)
else:
if self.run_in_terminal and not self.mixer_internal:
term = find_term()
cmd = [term, "-e", mixer]
else:
cmd = which(mixer)
Popen(cmd, shell=False)
except Exception, err:
log.debug(cmd)
log.exception(str(err))
def mixer_get_pid(self):
""" Get process id of mixer application """
mixer = "volti-mixer" if self.mixer_internal else self.mixer
pid = Popen(self.pid_app + " " + os.path.basename(mixer),
stdout=PIPE, shell=True).communicate()[0]
if pid:
try:
return int(pid)
except ValueError:
return None
return None
def mixer_get_active(self):
""" Returns status of mixer application """
if self.mixer_get_pid():
return True
return False
def main(self):
""" Main loop """
GObject.threads_init()
try:
Gtk.main()
except KeyboardInterrupt:
pass
def quit(self, widget=None):
""" Quit main loop """
Gtk.main_quit()
| gpl-3.0 | -6,756,951,760,547,837,000 | 33.413598 | 86 | 0.57672 | false |
grnet/snf-image-creator | image_creator/output/composite.py | 2 | 3350 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2017 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This module implements the CompositeOutput output class"""
from image_creator.output import Output
class CompositeOutput(Output, list):
"""This class can be used to composite different outputs into a single one
You may create an instance of this class and then add other output
instances to it. Executing a method on this instance will cause the
execution of the same method in each output instance that has been added to
this one.
"""
def __init__(self, outputs=None):
"""Add initial output instances"""
super(CompositeOutput, self).__init__()
if outputs is not None:
self.extend(outputs)
def error(self, msg):
"""Call the error method of each of the output instances"""
for out in self:
out.error(msg)
def warn(self, msg):
"""Call the warn method of each of the output instances"""
for out in self:
out.warn(msg)
def success(self, msg):
"""Call the success method of each of the output instances"""
for out in self:
out.success(msg)
def info(self, msg='', new_line=True):
"""Call the output method of each of the output instances"""
for out in self:
out.info(msg, new_line)
def result(self, msg=''):
"""Call the output method of each of the output instances"""
for out in self:
out.result(msg)
def cleanup(self):
"""Call the cleanup method of each of the output instances"""
for out in self:
out.cleanup()
def clear(self):
"""Call the clear method of each of the output instances"""
for out in self:
out.clear()
class _Progress(list):
"""Class used to composite different Progress objects"""
def __init__(self, size, title, bar_type='default'):
"""Create a progress on each of the added output instances"""
for out in self.parent: # pylint: disable=no-member
self.append(out.Progress(size, title, bar_type))
def goto(self, dest):
"""Call the goto method of each of the progress instances"""
for progress in self:
progress.goto(dest)
def next(self):
"""Call the next method of each of the progress instances"""
for progress in self:
progress.next()
def success(self, result):
"""Call the success method of each of the progress instances"""
for progress in self:
progress.success(result)
# vim: set sta sts=4 shiftwidth=4 sw=4 et ai :
| gpl-3.0 | -6,774,745,026,118,358,000 | 33.536082 | 79 | 0.62806 | false |
lferr/charm | charm/toolbox/PKEnc.py | 3 | 2819 | '''
Base class for public-key encryption
Notes: This class implements an interface for a standard public-key encryption scheme.
A public key encryption consists of four algorithms: (paramgen, keygen, encrypt, decrypt).
'''
from charm.toolbox.schemebase import *
encBaseSecDefs = Enum('OW_CPA','OW_CCA1','OW_CCA','IND_CPA','IND_CCA1','IND_CCA',
'NM_CPA','NM_CCA1','NM_CCA','KA_CPA','KA_CCA1','KA_CCA')
OW_CPA,OW_CCA1,OW_CCA="OW_CPA","OW_CCA1","OW_CCA"
IND_CPA,IND_CCA1,IND_CCA="IND_CPA","IND_CCA1","IND_CCA"
NM_CPA,NM_CCA1,NM_CCA="NM_CPA","NM_CCA1","NM_CCA"
KA_CPA,KA_CCA1,KA_CCA='KA_CPA','KA_CCA1','KA_CCA'
pkencSchemeType="pkeScheme"
class PKEnc(SchemeBase):
def __init__(self):
SchemeBase.__init__(self)
SchemeBase._setProperty(self, scheme='PKEnc')
def setProperty(self, secDef=None, assumption=None, messageSpace=None, secModel=None, **kwargs):
assert secDef is not None and secDef in encBaseSecDefs.getList(), "not a valid security definition for this scheme type."
SchemeBase._setProperty(self, None, encBaseSecDefs[secDef], str(assumption), messageSpace, str(secModel), **kwargs)
return True
def getProperty(self):
baseProp = SchemeBase._getProperty(self)
return baseProp
def checkProperty(self, schemeObj, _reqProps):
reqProps = [ (str(k), str(v)) for k,v in _reqProps ]
result = SchemeBase._checkProperty(self, schemeObj, reqProps)
return result
def updateProperty(self, scheme, secDef=None, assumption=None, messageSpace=None, secModel=None, **kwargs):
# 1. inherit the scheme's properties
assert hasattr(scheme, 'properties'), "schemeObj does not have getProperty() method."
self.properties.update(scheme.getProperty())
# 2. make sure things are consistent, then update to new properties
assert self.properties[schemeType] is not None, "scheme type wasn't specified on initialization"
assert secDef is not None and secDef in encBaseSecDefs.getList(), "not a valid security definition for this scheme type."
SchemeBase._setProperty(self, None, encBaseSecDefs[secDef], str(assumption), messageSpace, str(secModel), **kwargs)
return
def printProperties(self):
name = str(self.__class__).split("'")[-2].split(".")[-1]
print("<=== %s Properties ===>" % name)
for k,v in self.properties.items():
print(k, ":", v)
print("<=== %s Properties ===>" % name)
return
def paramgen(self, param1=None, param2=None):
return NotImplemented
def keygen(self, securityparam):
return NotImplemented
def encrypt(self, pk, M):
return NotImplemented
def decrypt(self, pk, sk, c):
return NotImplemented
| lgpl-3.0 | -8,467,246,845,154,118,000 | 41.712121 | 129 | 0.656971 | false |
vegarang/devilry-django | devilry/restful/serializers.py | 1 | 4059 | from functools import wraps
import json
from django.db.models.query import ValuesQuerySet
from django.http import HttpResponseBadRequest, HttpResponse, HttpResponseForbidden
from devilry.defaults.encoding import CHARSET
class SerializableResult(object):
""" Stores Python objects for serialization with :class:`devilry.simplified.serializers.SerializerRegistry`. """
def __init__(self, result, httpresponsecls=HttpResponse, encoding=CHARSET):
self.result = result
self.httpresponsecls = httpresponsecls
self.encoding = encoding
class ErrorMsgSerializableResult(SerializableResult):
def __init__(self, errormessage, httpresponsecls):
super(ErrorMsgSerializableResult, self).__init__(dict(errormessages=[errormessage]),
httpresponsecls=httpresponsecls)
class ForbiddenSerializableResult(ErrorMsgSerializableResult):
def __init__(self, exception=None):
if exception and exception.message:
errormessage = exception.message
else:
errormessage = 'Forbidden'
super(ForbiddenSerializableResult, self).__init__(errormessage,
HttpResponseForbidden)
class InvalidUsernameSerializableResult(ErrorMsgSerializableResult):
def __init__(self, username):
super(InvalidUsernameSerializableResult, self).__init__('Invalid username: {0}'.format(username),
HttpResponseBadRequest)
class SerializerRegistryItem(object):
def __init__(self, serializer, deserializer):
self.serializer = serializer
self.deserializer = deserializer
class SerializerRegistry(dict):
def create_response(self, result, comformat, content_type=None):
i = self[comformat]
content_type = content_type or comformat
return result.httpresponsecls(i.serializer(result.result),
content_type='{0}; encoding={1}'.format(content_type, result.encoding))
def deserialize(self, comformat, data):
i = self[comformat]
return i.deserializer(data)
def json_serialize_handler(obj):
#print type(obj)
if isinstance(obj, ValuesQuerySet):
return list(obj)
if hasattr(obj, 'isoformat'):
#return obj.strftime('%Y-%m-%d')
return obj.strftime('%Y-%m-%dT%H:%M:%S')
else:
raise TypeError('Object of type %s with value of %s is not JSON serializable' % (
type(obj), repr(obj)))
def json_serialize(s):
return json.dumps(s, default=json_serialize_handler, indent=2)
serializers = SerializerRegistry()
serializers['application/json'] = SerializerRegistryItem(json_serialize, json.loads)
def _serialize(content_type_override=None):
def decorator(f):
@wraps(f)
def wrapper(self, request, *args, **kwargs):
comformat = request.META.get('Accept', 'application/json')
if not comformat in serializers:
return HttpResponseBadRequest(
"Bad request: %s" % comformat,
format='text/plain; encoding={0}'.format(CHARSET))
self.comformat = comformat
result = f(self, request, *args, **kwargs) # returns a SerializableResult object
return serializers.create_response(result, comformat, content_type_override)
return wrapper
return decorator
def serialize(f=None, content_type_override=None):
""" Decorator to serialize response.
:param content_type_override: Override content type of response.
Serialization is still done using the the communication format from the
Accept header, however the content-type header will use this format instead
of the communication format. Mainly useful when browsers need text/html
response to behave, such as with file upload.
"""
decorator = _serialize(content_type_override=content_type_override)
if f:
return decorator(f)
else:
return decorator
| bsd-3-clause | 1,314,081,677,700,295,700 | 40 | 116 | 0.664449 | false |
gurneyalex/odoo | addons/delivery/tests/test_delivery_stock_move.py | 3 | 3418 | # -*- coding: utf-8 -*-
from odoo.addons.account.tests.account_test_classes import AccountingTestCase
from odoo.tests import tagged, Form
@tagged('post_install', '-at_install')
class StockMoveInvoice(AccountingTestCase):
def setUp(self):
super(StockMoveInvoice, self).setUp()
self.ProductProduct = self.env['product.product']
self.SaleOrder = self.env['sale.order']
self.AccountJournal = self.env['account.journal']
self.partner_18 = self.env.ref('base.res_partner_18')
self.pricelist_id = self.env.ref('product.list0')
self.product_11 = self.env.ref('product.product_product_11')
self.product_cable_management_box = self.env.ref('stock.product_cable_management_box')
self.product_uom_unit = self.env.ref('uom.product_uom_unit')
self.normal_delivery = self.env.ref('delivery.normal_delivery_carrier')
def test_01_delivery_stock_move(self):
# Test if the stored fields of stock moves are computed with invoice before delivery flow
self.product_11.write({
'weight': 0.25,
})
self.sale_prepaid = self.SaleOrder.create({
'partner_id': self.partner_18.id,
'partner_invoice_id': self.partner_18.id,
'partner_shipping_id': self.partner_18.id,
'pricelist_id': self.pricelist_id.id,
'order_line': [(0, 0, {
'name': 'Cable Management Box',
'product_id': self.product_cable_management_box.id,
'product_uom_qty': 2,
'product_uom': self.product_uom_unit.id,
'price_unit': 750.00,
})],
})
# I add delivery cost in Sales order
delivery_wizard = Form(self.env['choose.delivery.carrier'].with_context({
'default_order_id': self.sale_prepaid.id,
'default_carrier_id': self.normal_delivery.id,
}))
choose_delivery_carrier = delivery_wizard.save()
choose_delivery_carrier.button_confirm()
# I confirm the SO.
self.sale_prepaid.action_confirm()
self.sale_prepaid._create_invoices()
# I check that the invoice was created
self.assertEqual(len(self.sale_prepaid.invoice_ids), 1, "Invoice not created.")
# I confirm the invoice
self.invoice = self.sale_prepaid.invoice_ids
self.invoice.post()
# I pay the invoice.
self.journal = self.AccountJournal.search([('type', '=', 'cash'), ('company_id', '=', self.sale_prepaid.company_id.id)], limit=1)
register_payments = self.env['account.payment.register'].with_context(active_ids=self.invoice.ids).create({
'journal_id': self.journal.id,
})
register_payments.create_payments()
# Check the SO after paying the invoice
self.assertNotEqual(self.sale_prepaid.invoice_count, 0, 'order not invoiced')
self.assertTrue(self.sale_prepaid.invoice_status == 'invoiced', 'order is not invoiced')
self.assertEqual(len(self.sale_prepaid.picking_ids), 1, 'pickings not generated')
# Check the stock moves
moves = self.sale_prepaid.picking_ids.move_lines
self.assertEqual(moves[0].product_qty, 2, 'wrong product_qty')
self.assertEqual(moves[0].weight, 2.0, 'wrong move weight')
# Ship
self.picking = self.sale_prepaid.picking_ids.action_done()
| agpl-3.0 | 933,671,224,996,467,800 | 40.682927 | 137 | 0.625219 | false |
Landric/PANFeed | panfeed_site/wsgi.py | 1 | 1432 | """
WSGI config for panfeed_site project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "punfeed.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "panfeed_site.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| gpl-3.0 | 8,266,313,809,794,317,000 | 43.75 | 79 | 0.793296 | false |
wso2-incubator/iot-server-appliances | Arduino Robot/PC_Clients/PythonTestClients/PublishDirectToBAM/BAMPythonPublisher/PythonClient.py | 15 | 1100 | #!/usr/bin/env python
from Publisher import *
ip = '10.100.7.38' # IP address of the server
port = 7713 # Thrift listen port of the server
username = 'admin' # username
password = 'admin' # passowrd
publisher = Publisher()
# Initialize publisher with ip and port of server
publisher.init(ip, port)
# Connect to server with username and password
publisher.connect(username, password)
# Define stream definition
streamDefinition = "{ 'name':'org.wso2.iot.statistics.device.pin.data', 'version':'1.0.0', 'nickName': 'IoT Connected Device Pin Data', 'description': 'Pin Data Received', 'tags': ['arduino', 'led13'], 'metaData':[ {'name':'ipAdd','type':'STRING'},{'name':'deviceType','type':'STRING'},{'name':'owner','type':'STRING'}, {'name':'time','type':'STRING'}], 'payloadData':[ {'name':'macAddress','type':'STRING'}, {'name':'pin','type':'STRING'}, {'name':'pinValue','type':'STRING'}, {'name':'description','type':'STRING'}] }";
publisher.defineStream(streamDefinition)
# Publish sample message
publisher.publish("Test message form python client")
# Disconnect
publisher.disconnect()
| apache-2.0 | 6,848,327,696,808,988,000 | 38.285714 | 521 | 0.694545 | false |
ptisserand/ansible | lib/ansible/modules/system/filesystem.py | 16 | 11356 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Alexander Bulimov <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- Alexander Bulimov (@abulimov)
module: filesystem
short_description: Makes a filesystem
description:
- This module creates a filesystem.
version_added: "1.2"
options:
fstype:
choices: [ btrfs, ext2, ext3, ext4, ext4dev, lvm, reiserfs, xfs, vfat ]
description:
- Filesystem type to be created.
- reiserfs support was added in 2.2.
- lvm support was added in 2.5.
- since 2.5, I(dev) can be an image file.
- vfat support was added in 2.5
required: yes
aliases: [type]
dev:
description:
- Target path to device or image file.
required: yes
aliases: [device]
force:
description:
- If C(yes), allows to create new filesystem on devices that already has filesystem.
type: bool
default: 'no'
resizefs:
description:
- If C(yes), if the block device and filesytem size differ, grow the filesystem into the space.
- Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(lvm), C(xfs) and C(vfat) filesystems.
- XFS Will only grow if mounted.
- vFAT will likely fail if fatresize < 1.04.
type: bool
default: 'no'
version_added: "2.0"
opts:
description:
- List of options to be passed to mkfs command.
requirements:
- Uses tools related to the I(fstype) (C(mkfs)) and C(blkid) command. When I(resizefs) is enabled, C(blockdev) command is required too.
notes:
- Potential filesystem on I(dev) are checked using C(blkid), in case C(blkid) isn't able to detect an existing filesystem,
this filesystem is overwritten even if I(force) is C(no).
'''
EXAMPLES = '''
- name: Create a ext2 filesystem on /dev/sdb1
filesystem:
fstype: ext2
dev: /dev/sdb1
- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks
filesystem:
fstype: ext4
dev: /dev/sdb1
opts: -cc
'''
from distutils.version import LooseVersion
import os
import re
import stat
from ansible.module_utils.basic import AnsibleModule, get_platform
class Device(object):
def __init__(self, module, path):
self.module = module
self.path = path
def size(self):
""" Return size in bytes of device. Returns int """
statinfo = os.stat(self.path)
if stat.S_ISBLK(statinfo.st_mode):
blockdev_cmd = self.module.get_bin_path("blockdev", required=True)
_, devsize_in_bytes, _ = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True)
return int(devsize_in_bytes)
elif os.path.isfile(self.path):
return os.path.getsize(self.path)
else:
self.module.fail_json(changed=False, msg="Target device not supported: %s" % self)
def __str__(self):
return self.path
class Filesystem(object):
GROW = None
MKFS = None
MKFS_FORCE_FLAGS = ''
def __init__(self, module):
self.module = module
@property
def fstype(self):
return type(self).__name__
def get_fs_size(self, dev):
""" Return size in bytes of filesystem on device. Returns int """
raise NotImplementedError()
def create(self, opts, dev):
if self.module.check_mode:
return
mkfs = self.module.get_bin_path(self.MKFS, required=True)
if opts is None:
cmd = "%s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, dev)
else:
cmd = "%s %s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, opts, dev)
self.module.run_command(cmd, check_rc=True)
def grow_cmd(self, dev):
cmd = self.module.get_bin_path(self.GROW, required=True)
return [cmd, str(dev)]
def grow(self, dev):
"""Get dev and fs size and compare. Returns stdout of used command."""
devsize_in_bytes = dev.size()
try:
fssize_in_bytes = self.get_fs_size(dev)
except NotImplementedError:
self.module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % self.fstype)
if not fssize_in_bytes < devsize_in_bytes:
self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
elif self.module.check_mode:
self.module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (self.fstype, dev))
else:
_, out, _ = self.module.run_command(self.grow_cmd(dev), check_rc=True)
return out
class Ext(Filesystem):
MKFS_FORCE_FLAGS = '-F'
GROW = 'resize2fs'
def get_fs_size(self, dev):
cmd = self.module.get_bin_path('tune2fs', required=True)
# Get Block count and Block size
_, size, _ = self.module.run_command([cmd, '-l', str(dev)], check_rc=True)
for line in size.splitlines():
if 'Block count:' in line:
block_count = int(line.split(':')[1].strip())
elif 'Block size:' in line:
block_size = int(line.split(':')[1].strip())
return block_size * block_count
class Ext2(Ext):
MKFS = 'mkfs.ext2'
class Ext3(Ext):
MKFS = 'mkfs.ext3'
class Ext4(Ext):
MKFS = 'mkfs.ext4'
class XFS(Filesystem):
MKFS = 'mkfs.xfs'
MKFS_FORCE_FLAGS = '-f'
GROW = 'xfs_growfs'
def get_fs_size(self, dev):
cmd = self.module.get_bin_path('xfs_growfs', required=True)
_, size, _ = self.module.run_command([cmd, '-n', str(dev)], check_rc=True)
for line in size.splitlines():
col = line.split('=')
if col[0].strip() == 'data':
if col[1].strip() != 'bsize':
self.module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "bsize")')
if col[2].split()[1] != 'blocks':
self.module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "blocks")')
block_size = int(col[2].split()[0])
block_count = int(col[3].split(',')[0])
return block_size * block_count
class Reiserfs(Filesystem):
MKFS = 'mkfs.reiserfs'
MKFS_FORCE_FLAGS = '-f'
class Btrfs(Filesystem):
MKFS = 'mkfs.btrfs'
def __init__(self, module):
super(Btrfs, self).__init__(module)
_, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True)
match = re.search(r" v([0-9.]+)", stdout)
if not match:
# v0.20-rc1 use stderr
match = re.search(r" v([0-9.]+)", stderr)
if match:
# v0.20-rc1 doesn't have --force parameter added in following version v3.12
if LooseVersion(match.group(1)) >= LooseVersion('3.12'):
self.MKFS_FORCE_FLAGS = '-f'
else:
self.MKFS_FORCE_FLAGS = ''
else:
# assume version is greater or equal to 3.12
self.MKFS_FORCE_FLAGS = '-f'
self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr))
class VFAT(Filesystem):
if get_platform() == 'FreeBSD':
MKFS = "newfs_msdos"
else:
MKFS = 'mkfs.vfat'
GROW = 'fatresize'
def get_fs_size(self, dev):
cmd = self.module.get_bin_path(self.GROW, required=True)
_, output, _ = self.module.run_command([cmd, '--info', str(dev)], check_rc=True)
for line in output.splitlines()[1:]:
param, value = line.split(':', 1)
if param.strip() == 'Size':
return int(value.strip())
self.module.fail_json(msg="fatresize failed to provide filesystem size for %s" % dev)
def grow_cmd(self, dev):
cmd = self.module.get_bin_path(self.GROW)
return [cmd, "-s", str(dev.size()), str(dev.path)]
class LVM(Filesystem):
MKFS = 'pvcreate'
MKFS_FORCE_FLAGS = '-f'
GROW = 'pvresize'
def get_fs_size(self, dev):
cmd = self.module.get_bin_path('pvs', required=True)
_, size, _ = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', str(dev)], check_rc=True)
block_count = int(size[:-1]) # block size is 1
return block_count
FILESYSTEMS = {
'ext2': Ext2,
'ext3': Ext3,
'ext4': Ext4,
'ext4dev': Ext4,
'reiserfs': Reiserfs,
'xfs': XFS,
'btrfs': Btrfs,
'vfat': VFAT,
'LVM2_member': LVM,
}
def main():
friendly_names = {
'lvm': 'LVM2_member',
}
fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys())
# There is no "single command" to manipulate filesystems, so we map them all out and their options
module = AnsibleModule(
argument_spec=dict(
fstype=dict(required=True, aliases=['type'],
choices=list(fstypes)),
dev=dict(required=True, aliases=['device']),
opts=dict(),
force=dict(type='bool', default=False),
resizefs=dict(type='bool', default=False),
),
supports_check_mode=True,
)
dev = module.params['dev']
fstype = module.params['fstype']
opts = module.params['opts']
force = module.params['force']
resizefs = module.params['resizefs']
if fstype in friendly_names:
fstype = friendly_names[fstype]
changed = False
try:
klass = FILESYSTEMS[fstype]
except KeyError:
module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype)
if not os.path.exists(dev):
module.fail_json(msg="Device %s not found." % dev)
dev = Device(module, dev)
cmd = module.get_bin_path('blkid', required=True)
rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
# In case blkid isn't able to identify an existing filesystem, device is considered as empty,
# then this existing filesystem would be overwritten even if force isn't enabled.
fs = raw_fs.strip()
filesystem = klass(module)
same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
if same_fs and not resizefs and not force:
module.exit_json(changed=False)
elif same_fs and resizefs:
if not filesystem.GROW:
module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
out = filesystem.grow(dev)
# Sadly there is no easy way to determine if this has changed. For now, just say "true" and move on.
# in the future, you would have to parse the output to determine this.
# thankfully, these are safe operations if no change is made.
module.exit_json(changed=True, msg=out)
elif fs and not force:
module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err)
# create fs
filesystem.create(opts, dev)
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,976,133,672,410,698,000 | 31.726225 | 137 | 0.599859 | false |
kynikos/wiki-scripts | list-problematic-pages.py | 1 | 6013 | #! /usr/bin/env python3
# TODO: create an on-wiki report using AutoPage
from ws.client import API
from ws.db.database import Database
from ws.parser_helpers.encodings import dotencode
import ws.ArchWiki.lang as lang
def valid_sectionname(db, title):
"""
Checks if the ``sectionname`` property of given title is valid, i.e. if a
corresponding section exists on a page with given title.
.. note::
Validation is limited to pages in the Main namespace for easier access
to the cache; anchors on other pages are considered to be always valid.
:param ws.db.database.Database db: database object
:param title: parsed title of the wikilink to be checked
:type title: ws.parser_helpers.title.Title
:returns: ``True`` if the anchor corresponds to an existing section
"""
# we can't check interwiki links
if title.iwprefix:
return True
# empty sectionname is always valid
if title.sectionname == "":
return True
# get list of valid anchors
result = db.query(titles=title.fullpagename, prop="sections", secprop={"anchor"})
page = next(result)
anchors = [section["anchor"] for section in page.get("sections", [])]
# encode the given anchor and validate
return dotencode(title.sectionname) in anchors
def list_redirects_broken_fragments(api, db):
db.sync_with_api(api)
db.sync_revisions_content(api, mode="latest")
db.update_parser_cache()
# limit to redirects pointing to the content namespaces
redirects = api.redirects.fetch(target_namespaces=[0, 4, 12])
for source in sorted(redirects.keys()):
target = redirects[source]
title = api.Title(target)
# limit to redirects with broken fragment
if valid_sectionname(db, title):
continue
print("* [[{}]] → [[{}]]".format(source, target))
def list_redirects_wrong_capitalization(api):
# limit to redirects pointing to the main namespace, others deserve special treatment
redirects = api.redirects.fetch(source_namespaces=[0, 4, 12], target_namespaces=[0])
# we will count the number of uppercase letters starting each word
def count_uppercase(text):
words = text.split()
firstletters = [word[0] for word in words]
return sum(1 for c in firstletters if c.isupper())
for source in sorted(redirects.keys()):
target = redirects[source].split("#", maxsplit=1)[0]
# limit to redirects whose source and target title differ only in capitalization
if source.lower() != target.lower():
continue
# limit to multiple-word titles
pure, _ = lang.detect_language(source)
if len(pure.split()) == 1:
continue
# limit to sentence-case titles redirecting to title-case
if count_uppercase(source) >= count_uppercase(target):
continue
print("* [[{}]] → [[{}]]".format(source, target))
def list_talkpages_of_deleted_pages(api):
# get titles of all pages in 'Main', 'ArchWiki' and 'Help' namespaces
allpages = []
for ns in ["0", "4", "12"]:
_pages = api.generator(generator="allpages", gaplimit="max", gapnamespace=ns)
allpages.extend([page["title"] for page in _pages])
# get titles of all redirect pages in 'Talk', 'ArchWiki talk' and 'Help talk' namespaces
talks = []
for ns in ["1", "5", "13"]:
pages = api.generator(generator="allpages", gaplimit="max", gapnamespace=ns)
talks.extend([page["title"] for page in pages])
# print talk pages of deleted pages
for title in sorted(talks):
_title = api.Title(title)
if _title.articlepagename not in allpages:
print("* [[%s]]" % title)
def list_talkpages_of_redirects(api):
# get titles of all redirect pages in 'Main', 'ArchWiki' and 'Help' namespaces
redirect_titles = []
for ns in ["0", "4", "12"]:
_pages = api.generator(generator="allpages", gaplimit="max", gapfilterredir="redirects", gapnamespace=ns)
redirect_titles.extend([page["title"] for page in _pages])
# get titles of all pages in 'Talk', 'ArchWiki talk' and 'Help talk' namespaces
talks = []
for ns in ["1", "5", "13"]:
# limiting to talk pages that are not redirects is also useful
# pages = api.generator(generator="allpages", gaplimit="max", gapnamespace=ns)
pages = api.generator(generator="allpages", gaplimit="max", gapfilterredir="nonredirects", gapnamespace=ns)
talks.extend([page["title"] for page in pages])
# print talk pages associated to a redirect page
for title in sorted(redirect_titles):
_title = api.Title(title)
if _title.talkpagename in talks:
print("* [[%s]]" % _title.talkpagename)
if __name__ == "__main__":
import ws.config
import ws.logging
argparser = ws.config.getArgParser(description="List redirects with broken fragments")
API.set_argparser(argparser)
Database.set_argparser(argparser)
args = argparser.parse_args()
# set up logging
ws.logging.init(args)
api = API.from_argparser(args)
db = Database.from_argparser(args)
print("== Redirects with broken fragments ==")
list_redirects_broken_fragments(api, db)
print()
print("== Redirects with potentially wrong capitalization ==")
print("""\
According to ArchWiki standards, the title must be sentence-case (if it is not
an acronym). We will print the wrong capitalized redirects, i.e. when
sentence-case title redirects to title-case.
""")
list_redirects_wrong_capitalization(api)
print()
print("== Talk pages of deleted pages ==")
print("The following talk pages correspond to deleted pages and should not exist.")
list_talkpages_of_deleted_pages(api)
print()
print("== Talk pages of redirects ==")
print("The following talk pages correspond to redirect pages and should be redirected as well or deleted.")
list_talkpages_of_redirects(api)
| gpl-3.0 | -8,877,456,038,966,192,000 | 36.092593 | 115 | 0.661674 | false |
cyberark-bizdev/ansible | lib/ansible/modules/network/aci/aci_switch_policy_vpc_protection_group.py | 1 | 8260 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_switch_policy_vpc_protection_group
short_description: Create switch policy Explicit vPC Protection Group on Cisco ACI fabrics (fabric:ExplicitGEp, fabric:NodePEp).
description:
- Create switch policy Explicit vPC Protection Group on Cisco ACI fabrics.
notes:
- More information from the internal APIC class
I(fabric:ExplicitGEp) and I(fabric:NodePEp) at U(https://developer.cisco.com/site/aci/docs/apis/apic-mim-ref/).
author:
- Bruno Calogero (@brunocalogero)
version_added: '2.5'
options:
protection_group:
description:
- The name of the Explicit vPC Protection Group.
aliases: [ name, protection_group_name ]
required: yes
protection_group_id:
description:
- The Explicit vPC Protection Group ID.
aliases: [ id ]
required: yes
vpc_domain_policy:
description:
- The vPC domain policy to be associated with the Explicit vPC Protection Group.
aliases: [ vpc_domain_policy_name ]
switch_1_id:
description:
- The ID of the first Leaf Switch for the Explicit vPC Protection Group.
required: yes
switch_2_id:
description:
- The ID of the Second Leaf Switch for the Explicit vPC Protection Group.
required: yes
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add Explicit vPC Protection Group
aci_switch_policy_vpc_protection_group:
host: apic
username: admin
password: SomeSecretPassword
protection_group: protectiongroupname
protection_group_id: 6
vpc_domain_policy: vpcdomainpolicyname
switch_1_id: 3811
switch_2_id: 3812
state: present
- name: Remove Explicit vPC Protection Group
aci_switch_policy_vpc_protection_group:
host: apic
username: admin
password: SomeSecretPassword
protection_group: protectiongroupname
state: absent
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
protection_group=dict(type='str', aliases=['name', 'protection_group_name']), # Not required for querying all objects
protection_group_id=dict(type='int', aliases=['id']),
vpc_domain_policy=dict(type='str', aliases=['vpc_domain_policy_name']),
switch_1_id=dict(type='int'),
switch_2_id=dict(type='int'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['protection_group']],
['state', 'present', ['protection_group', 'protection_group_id', 'switch_1_id', 'switch_2_id']],
],
)
protection_group = module.params['protection_group']
protection_group_id = module.params['protection_group_id']
vpc_domain_policy = module.params['vpc_domain_policy']
switch_1_id = module.params['switch_1_id']
switch_2_id = module.params['switch_2_id']
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fabricExplicitGEp',
aci_rn='fabric/protpol/expgep-{0}'.format(protection_group),
filter_target='eq(fabricExplicitGEp.name, "{0}")'.format(protection_group),
module_object=protection_group,
),
child_classes=['fabricNodePEp', 'fabricNodePEp', 'fabricRsVpcInstPol'],
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fabricExplicitGEp',
class_config=dict(
name=protection_group,
id=protection_group_id,
rn='expgep-{0}'.format(protection_group),
),
child_configs=[
dict(
fabricNodePEp=dict(
attributes=dict(
id='{0}'.format(switch_1_id),
rn='nodepep-{0}'.format(switch_1_id),
),
),
),
dict(
fabricNodePEp=dict(
attributes=dict(
id='{0}'.format(switch_2_id),
rn='nodepep-{0}'.format(switch_2_id),
),
),
),
dict(
fabricRsVpcInstPol=dict(
attributes=dict(
tnVpcInstPolName=vpc_domain_policy,
),
),
),
],
)
aci.get_diff(aci_class='fabricExplicitGEp')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 | -6,030,748,732,098,617,000 | 29.036364 | 141 | 0.581598 | false |
HyperBaton/ansible | lib/ansible/modules/network/fortios/fortios_firewall_schedule_recurring.py | 7 | 11614 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_schedule_recurring
short_description: Recurring schedule configuration in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall_schedule feature and recurring category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_schedule_recurring:
description:
- Recurring schedule configuration.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
color:
description:
- Color of icon on the GUI.
type: int
day:
description:
- One or more days of the week on which the schedule is valid. Separate the names of the days with a space.
type: str
choices:
- sunday
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
- none
end:
description:
- "Time of day to end the schedule, format hh:mm."
type: str
name:
description:
- Recurring schedule name.
required: true
type: str
start:
description:
- "Time of day to start the schedule, format hh:mm."
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Recurring schedule configuration.
fortios_firewall_schedule_recurring:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_schedule_recurring:
color: "3"
day: "sunday"
end: "<your_own_value>"
name: "default_name_6"
start: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_schedule_recurring_data(json):
option_list = ['color', 'day', 'end',
'name', 'start']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_schedule_recurring(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_schedule_recurring'] and data['firewall_schedule_recurring']:
state = data['firewall_schedule_recurring']['state']
else:
state = True
firewall_schedule_recurring_data = data['firewall_schedule_recurring']
filtered_data = underscore_to_hyphen(filter_firewall_schedule_recurring_data(firewall_schedule_recurring_data))
if state == "present":
return fos.set('firewall.schedule',
'recurring',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall.schedule',
'recurring',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall_schedule(data, fos):
if data['firewall_schedule_recurring']:
resp = firewall_schedule_recurring(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_schedule_recurring": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"color": {"required": False, "type": "int"},
"day": {"required": False, "type": "str",
"choices": ["sunday", "monday", "tuesday",
"wednesday", "thursday", "friday",
"saturday", "none"]},
"end": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"start": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall_schedule(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall_schedule(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,218,717,658,303,861,000 | 30.559783 | 127 | 0.579645 | false |
jonnatas/codeschool | src/djcomponents/jquery/widgets.py | 2 | 1462 | from djcomponents import Widget
class Accordion(Widget):
"""
jQuery UI accordion element.
An accordion element that shows each sub-element at a time.
"""
class Autocomplete(Widget):
"""
jQuery UI autocomplete element.
Autocomplete text as you type.
"""
class Button(Widget):
"""
jQuery UI button element.
Enhanced button and anchor types.
"""
class DatePicker(Widget):
"""
jQuery UI datepicker element.
A date input element that displays a calendar.
"""
class Dialog(Widget):
"""
jQuery UI dialog element.
Simple <div>-based dialog support.
"""
class Menu(Widget):
"""
jQuery UI menu element.
A menu of options.
"""
class ProgressBar(Widget):
"""
jQuery UI progressbar element.
Show progress of an action.
"""
class SelectMenu(Widget):
"""
jQuery UI selectmenu element.
Enchanced version of a HTML <select> element.
"""
class Slider(Widget):
"""
jQuery UI slider element.
Graphical representation of a numerical value in a range.
"""
class Spinner(Widget):
"""
jQuery UI spinner element.
Keeps the user distracted while a slow operation takes place.
"""
class Tabs(Widget):
"""
jQuery UI tabs element.
Organize content inside tabs.
"""
class Tooltip(Widget):
"""
jQuery UI tooltip element.
Tips and extra information to the user.
"""
| gpl-3.0 | -2,558,969,589,898,031,600 | 14.072165 | 65 | 0.627907 | false |
uffejakobsen/libsigrokdecode | decoders/flexray/pd.py | 4 | 16182 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2019 Stephan Thiele <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
# Selection of constants as defined in FlexRay specification 3.0.1 Chapter A.1:
class Const:
cChannelIdleDelimiter = 11
cCrcInitA = 0xFEDCBA
cCrcInitB = 0xABCDEF
cCrcPolynomial = 0x5D6DCB
cCrcSize = 24
cCycleCountMax = 63
cdBSS = 2
cdCAS = 30
cdFES = 2
cdFSS = 1
cHCrcInit = 0x01A
cHCrcPolynomial = 0x385
cHCrcSize = 11
cSamplesPerBit = 8
cSlotIDMax = 2047
cStaticSlotIDMax = 1023
cVotingSamples = 5
class SamplerateError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 3
id = 'flexray'
name = 'FlexRay'
longname = 'FlexRay'
desc = 'Automotive network communications protocol.'
license = 'gplv2+'
inputs = ['logic']
outputs = []
tags = ['Automotive']
channels = (
{'id': 'channel', 'name': 'Channel', 'desc': 'FlexRay bus channel'},
)
options = (
{'id': 'channel_type', 'desc': 'Channel type', 'default': 'A',
'values': ('A', 'B')},
{'id': 'bitrate', 'desc': 'Bitrate (bit/s)', 'default': 10000000,
'values': (10000000, 5000000, 2500000)},
)
annotations = (
('data', 'FlexRay payload data'),
('tss', 'Transmission start sequence'),
('fss', 'Frame start sequence'),
('reserved-bit', 'Reserved bit'),
('ppi', 'Payload preamble indicator'),
('null-frame', 'Nullframe indicator'),
('sync-frame', 'Full identifier'),
('startup-frame', 'Startup frame indicator'),
('id', 'Frame ID'),
('length', 'Data length'),
('header-crc', 'Header CRC'),
('cycle', 'Cycle code'),
('data-byte', 'Data byte'),
('frame-crc', 'Frame CRC'),
('fes', 'Frame end sequence'),
('bss', 'Byte start sequence'),
('warning', 'Warning'),
('bit', 'Bit'),
('cid', 'Channel idle delimiter'),
('dts', 'Dynamic trailing sequence'),
('cas', 'Collision avoidance symbol'),
)
annotation_rows = (
('bits', 'Bits', (15, 17)),
('fields', 'Fields', tuple(range(15)) + (18, 19, 20)),
('warnings', 'Warnings', (16,)),
)
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.reset_variables()
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
bitrate = float(self.options['bitrate'])
self.samplerate = value
self.bit_width = float(self.samplerate) / bitrate
self.sample_point = (self.bit_width / 100.0) * self.sample_point_percent
# Generic helper for FlexRay bit annotations.
def putg(self, ss, es, data):
left, right = int(self.sample_point), int(self.bit_width - self.sample_point)
self.put(ss - left, es + right, self.out_ann, data)
# Single-FlexRay-bit annotation using the current samplenum.
def putx(self, data):
self.putg(self.samplenum, self.samplenum, data)
# Multi-FlexRay-bit annotation from self.ss_block to current samplenum.
def putb(self, data):
self.putg(self.ss_block, self.samplenum, data)
# Generic CRC algorithm for any bit size and any data length. Used for
# 11-bit header and 24-bit trailer. Not very efficient but at least it
# works for now.
#
# TODO:
# - use precalculated tables to increase performance.
# - Add support for reverse CRC calculations.
@staticmethod
def crc(data, data_len_bits, polynom, crc_len_bits, iv=0, xor=0):
reg = iv ^ xor
for i in range(data_len_bits - 1, -1, -1):
bit = ((reg >> (crc_len_bits - 1)) & 0x1) ^ ((data >> i) & 0x1)
reg <<= 1
if bit:
reg ^= polynom
mask = (1 << crc_len_bits) - 1
crc = reg & mask
return crc ^ xor
def reset_variables(self):
self.sample_point_percent = 50 # TODO: use vote based sampling
self.state = 'IDLE'
self.tss_start = self.tss_end = self.frame_type = self.dlc = None
self.rawbits = [] # All bits, including byte start sequence bits
self.bits = [] # Only actual FlexRay frame bits (no byte start sequence bits)
self.curbit = 0 # Current bit of FlexRay frame (bit 0 == FSS)
self.last_databit = 999 # Positive value that bitnum+x will never match
self.last_xmit_bit = 999 # Positive value that bitnum+x will never match
self.ss_block = None
self.ss_databytebits = []
self.end_of_frame = False
self.dynamic_frame = False
self.ss_bit0 = None
self.ss_bit1 = None
self.ss_bit2 = None
# Poor man's clock synchronization. Use signal edges which change to
# dominant state in rather simple ways. This naive approach is neither
# aware of the SYNC phase's width nor the specific location of the edge,
# but improves the decoder's reliability when the input signal's bitrate
# does not exactly match the nominal rate.
def dom_edge_seen(self, force=False):
self.dom_edge_snum = self.samplenum
self.dom_edge_bcount = self.curbit
# Determine the position of the next desired bit's sample point.
def get_sample_point(self, bitnum):
samplenum = self.dom_edge_snum
samplenum += self.bit_width * (bitnum - self.dom_edge_bcount)
samplenum += self.sample_point
return int(samplenum)
def is_bss_sequence(self):
# FlexRay uses NRZ encoding and adds a binary 10 sequence before each
# byte. After each 8 data bits, a BSS sequence is added but not after
# frame CRC.
if self.end_of_frame:
return False
if (len(self.rawbits) - 2) % 10 == 0:
return True
elif (len(self.rawbits) - 3) % 10 == 0:
return True
return False
def handle_bit(self, fr_rx):
self.rawbits.append(fr_rx)
self.bits.append(fr_rx)
# Get the index of the current FlexRay frame bit.
bitnum = len(self.bits) - 1
# If this is a byte start sequence remove it from self.bits and ignore it.
if self.is_bss_sequence():
self.bits.pop()
if bitnum > 1:
self.putx([15, [str(fr_rx)]])
else:
if len(self.rawbits) == 2:
self.ss_bit1 = self.samplenum
elif len(self.rawbits) == 3:
self.ss_bit2 = self.samplenum
self.curbit += 1 # Increase self.curbit (bitnum is not affected).
return
else:
if bitnum > 1:
self.putx([17, [str(fr_rx)]])
# Bit 0: Frame start sequence (FSS) bit
if bitnum == 0:
self.ss_bit0 = self.samplenum
# Bit 1: Start of header
elif bitnum == 1:
if self.rawbits[:3] == [1, 1, 0]:
self.put(self.tss_start, self.tss_end, self.out_ann,
[1, ['Transmission start sequence', 'TSS']])
self.putg(self.ss_bit0, self.ss_bit0, [17, [str(self.rawbits[:3][0])]])
self.putg(self.ss_bit0, self.ss_bit0, [2, ['FSS', 'Frame start sequence']])
self.putg(self.ss_bit1, self.ss_bit1, [15, [str(self.rawbits[:3][1])]])
self.putg(self.ss_bit2, self.ss_bit2, [15, [str(self.rawbits[:3][2])]])
self.putx([17, [str(fr_rx)]])
self.putx([3, ['Reserved bit: %d' % fr_rx, 'RB: %d' % fr_rx, 'RB']])
else:
self.put(self.tss_start, self.tss_end, self.out_ann,
[20, ['Collision avoidance symbol', 'CAS']])
self.reset_variables()
# TODO: warning, if sequence is neither [1, 1, 0] nor [1, 1, 1]
# Bit 2: Payload preamble indicator. Must be 0 if null frame indicator is 0.
elif bitnum == 2:
self.putx([4, ['Payload preamble indicator: %d' % fr_rx,
'PPI: %d' % fr_rx]])
# Bit 3: Null frame indicator (inversed)
elif bitnum == 3:
data_type = 'data frame' if fr_rx else 'null frame'
self.putx([5, ['Null frame indicator: %s' % data_type,
'NF: %d' % fr_rx, 'NF']])
# Bit 4: Sync frame indicator
# Must be 1 if startup frame indicator is 1.
elif bitnum == 4:
self.putx([6, ['Sync frame indicator: %d' % fr_rx,
'Sync: %d' % fr_rx, 'Sync']])
# Bit 5: Startup frame indicator
elif bitnum == 5:
self.putx([7, ['Startup frame indicator: %d' % fr_rx,
'Startup: %d' % fr_rx, 'Startup']])
# Remember start of ID (see below).
elif bitnum == 6:
self.ss_block = self.samplenum
# Bits 6-16: Frame identifier (ID[10..0])
# ID must NOT be 0.
elif bitnum == 16:
self.id = int(''.join(str(d) for d in self.bits[6:]), 2)
self.putb([8, ['Frame ID: %d' % self.id, 'ID: %d' % self.id,
'%d' % self.id]])
# Remember start of payload length (see below).
elif bitnum == 17:
self.ss_block = self.samplenum
# Bits 17-23: Payload length (Length[7..0])
# Payload length in header is the half of the real payload size.
elif bitnum == 23:
self.payload_length = int(''.join(str(d) for d in self.bits[17:]), 2)
self.putb([9, ['Payload length: %d' % self.payload_length,
'Length: %d' % self.payload_length,
'%d' % self.payload_length]])
# Remember start of header CRC (see below).
elif bitnum == 24:
self.ss_block = self.samplenum
# Bits 24-34: Header CRC (11-bit) (HCRC[11..0])
# Calculation of header CRC is equal on both channels.
elif bitnum == 34:
bits = ''.join([str(b) for b in self.bits[4:24]])
header_to_check = int(bits, 2)
expected_crc = self.crc(header_to_check, len(bits),
Const.cHCrcPolynomial, Const.cHCrcSize, Const.cHCrcInit)
self.header_crc = int(''.join(str(d) for d in self.bits[24:]), 2)
crc_ok = self.header_crc == expected_crc
crc_ann = "OK" if crc_ok else "bad"
self.putb([10, ['Header CRC: 0x%X (%s)' % (self.header_crc, crc_ann),
'0x%X (%s)' % (self.header_crc, crc_ann),
'0x%X' % self.header_crc]])
# Remember start of cycle code (see below).
elif bitnum == 35:
self.ss_block = self.samplenum
# Bits 35-40: Cycle code (Cyc[6..0])
# Cycle code. Must be between 0 and 63.
elif bitnum == 40:
self.cycle = int(''.join(str(d) for d in self.bits[35:]), 2)
self.putb([11, ['Cycle: %d' % self.cycle, 'Cyc: %d' % self.cycle,
'%d' % self.cycle]])
self.last_databit = 41 + 2 * self.payload_length * 8
# Remember all databyte bits, except the very last one.
elif bitnum in range(41, self.last_databit):
self.ss_databytebits.append(self.samplenum)
# Bits 41-X: Data field (0-254 bytes, depending on length)
# The bits within a data byte are transferred MSB-first.
elif bitnum == self.last_databit:
self.ss_databytebits.append(self.samplenum) # Last databyte bit.
for i in range(2 * self.payload_length):
x = 40 + (8 * i) + 1
b = int(''.join(str(d) for d in self.bits[x:x + 8]), 2)
ss = self.ss_databytebits[i * 8]
es = self.ss_databytebits[((i + 1) * 8) - 1]
self.putg(ss, es, [12, ['Data byte %d: 0x%02x' % (i, b),
'DB%d: 0x%02x' % (i, b), '%02X' % b]])
self.ss_databytebits = []
self.ss_block = self.samplenum # Remember start of trailer CRC.
# Trailer CRC (24-bit) (CRC[11..0])
# Initialization vector of channel A and B are different, so CRCs are
# different for same data.
elif bitnum == self.last_databit + 23:
bits = ''.join([str(b) for b in self.bits[1:-24]])
frame_to_check = int(bits, 2)
iv = Const.cCrcInitA if self.options['channel_type'] == 'A' else Const.cCrcInitB
expected_crc = self.crc(frame_to_check, len(bits),
Const.cCrcPolynomial, Const.cCrcSize, iv=iv)
self.frame_crc = int(''.join(str(d) for d in self.bits[self.last_databit:]), 2)
crc_ok = self.frame_crc == expected_crc
crc_ann = "OK" if crc_ok else "bad"
self.putb([13, ['Frame CRC: 0x%X (%s)' % (self.frame_crc, crc_ann),
'0x%X (%s)' % (self.frame_crc, crc_ann),
'0x%X' % self.frame_crc]])
self.end_of_frame = True
# Remember start of frame end sequence (see below).
elif bitnum == self.last_databit + 24:
self.ss_block = self.samplenum
# Frame end sequence, must be 1 followed by 0.
elif bitnum == self.last_databit + 25:
self.putb([14, ['Frame end sequence', 'FES']])
# Check for DTS
elif bitnum == self.last_databit + 26:
if not fr_rx:
self.dynamic_frame = True
else:
self.last_xmit_bit = bitnum
self.ss_block = self.samplenum
# Remember start of channel idle delimiter (see below).
elif bitnum == self.last_xmit_bit:
self.ss_block = self.samplenum
# Channel idle limiter (CID[11..0])
elif bitnum == self.last_xmit_bit + Const.cChannelIdleDelimiter - 1:
self.putb([18, ['Channel idle delimiter', 'CID']])
self.reset_variables()
# DTS if dynamic frame
elif bitnum > self.last_databit + 27:
if self.dynamic_frame:
if fr_rx:
if self.last_xmit_bit == 999:
self.putb([19, ['Dynamic trailing sequence', 'DTS']])
self.last_xmit_bit = bitnum + 1
self.ss_block = self.samplenum
self.curbit += 1
def decode(self):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
while True:
# State machine.
if self.state == 'IDLE':
# Wait for a dominant state (logic 0) on the bus.
(fr_rx,) = self.wait({0: 'l'})
self.tss_start = self.samplenum
(fr_rx,) = self.wait({0: 'h'})
self.tss_end = self.samplenum
self.dom_edge_seen(force = True)
self.state = 'GET BITS'
elif self.state == 'GET BITS':
# Wait until we're in the correct bit/sampling position.
pos = self.get_sample_point(self.curbit)
(fr_rx,) = self.wait([{'skip': pos - self.samplenum}, {0: 'f'}])
if self.matched[1]:
self.dom_edge_seen()
if self.matched[0]:
self.handle_bit(fr_rx)
| gpl-3.0 | 4,628,058,687,782,680,000 | 38.181598 | 92 | 0.543938 | false |
sekikn/bigtop | bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/actions/parseBenchmark.py | 7 | 1460 | #!/usr/local/sbin/charm-env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple script to parse benchmark transaction results
and reformat them as JSON for sending back to juju
"""
import sys
import json
from charmhelpers.core import hookenv
import re
def parse_benchmark_output():
"""
Parse the output from the benchmark and set the action results:
"""
results = {}
# Find all of the interesting things
regex = re.compile('\t+(.*)=(.*)')
for line in sys.stdin.readlines():
m = regex.match(line)
if m:
results[m.group(1)] = m.group(2)
hookenv.action_set({"meta.raw": json.dumps(results)})
if __name__ == "__main__":
parse_benchmark_output()
| apache-2.0 | 2,337,092,406,438,506,000 | 32.181818 | 74 | 0.713014 | false |
AndreasMadsen/tensorflow | tensorflow/contrib/learn/python/learn/monitors_test.py | 14 | 28403 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitors tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import shutil
import tempfile
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib import testing
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
class _MyEveryN(learn.monitors.EveryN):
def __init__(self, every_n_steps=100, first_n_steps=1):
super(_MyEveryN, self).__init__(
every_n_steps=every_n_steps, first_n_steps=first_n_steps)
self._steps_begun = []
self._steps_ended = []
self._post_steps = []
@property
def steps_begun(self):
return self._steps_begun
@property
def steps_ended(self):
return self._steps_ended
@property
def post_steps(self):
return self._post_steps
def every_n_step_begin(self, step):
super(_MyEveryN, self).every_n_step_begin(step)
self._steps_begun.append(step)
return []
def every_n_step_end(self, step, outputs):
super(_MyEveryN, self).every_n_step_end(step, outputs)
self._steps_ended.append(step)
return False
def every_n_post_step(self, step, session):
super(_MyEveryN, self).every_n_post_step(step, session)
self._post_steps.append(step)
return False
class MonitorsTest(tf.test.TestCase):
"""Monitors tests."""
def setUp(self):
# Mock out logging calls so we can verify whether correct tensors are being
# monitored.
self._actual_log = logging.info
def mockLog(*args, **kwargs): # pylint: disable=invalid-name
self.logged_message = args
self._actual_log(*args, **kwargs)
logging.info = mockLog
def tearDown(self):
logging.info = self._actual_log
def _run_monitor(self, monitor, num_epochs=3, num_steps_per_epoch=10,
pass_max_steps=True):
if pass_max_steps:
max_steps = num_epochs * num_steps_per_epoch - 1
else:
max_steps = None
monitor.begin(max_steps=max_steps)
for epoch in xrange(num_epochs):
monitor.epoch_begin(epoch)
should_stop = False
step = epoch * num_steps_per_epoch
next_epoch_step = step + num_steps_per_epoch
while (not should_stop) and (step < next_epoch_step):
tensors = monitor.step_begin(step)
output = tf.get_default_session().run(tensors) if tensors else {}
output = dict(zip(
[t.name if isinstance(t, tf.Tensor) else t for t in tensors],
output))
should_stop = monitor.step_end(step=step, output=output)
monitor.post_step(step=step, session=None)
step += 1
monitor.epoch_end(epoch)
monitor.end()
def test_base_monitor(self):
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(learn.monitors.BaseMonitor())
def test_every_0(self):
monitor = _MyEveryN(every_n_steps=0, first_n_steps=-1)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = list(range(30))
self.assertAllEqual(expected_steps, monitor.steps_begun)
self.assertAllEqual(expected_steps, monitor.steps_ended)
self.assertAllEqual(expected_steps, monitor.post_steps)
def test_every_1(self):
monitor = _MyEveryN(every_n_steps=1, first_n_steps=-1)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = list(range(1, 30))
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_2(self):
monitor = _MyEveryN(every_n_steps=2, first_n_steps=-1)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = list(range(2, 29, 2)) + [29]
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_8(self):
monitor = _MyEveryN(every_n_steps=8, first_n_steps=2)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = [0, 1, 2, 10, 18, 26, 29]
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_8_no_max_steps(self):
monitor = _MyEveryN(every_n_steps=8, first_n_steps=2)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10,
pass_max_steps=False)
begin_end_steps = [0, 1, 2, 10, 18, 26]
post_steps = [0, 1, 2, 10, 18, 26, 29]
self.assertEqual(begin_end_steps, monitor.steps_begun)
self.assertEqual(begin_end_steps, monitor.steps_ended)
self.assertEqual(post_steps, monitor.post_steps)
def test_every_8_recovered_after_step_begin(self):
monitor = _MyEveryN(every_n_steps=8)
with tf.Graph().as_default() as g, self.test_session(g):
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
# It should call begin again since, end was not called
self.assertEqual([8, 8, 16, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16], monitor.post_steps)
def test_every_8_recovered_after_step_end(self):
monitor = _MyEveryN(every_n_steps=8)
with tf.Graph().as_default() as g, self.test_session(g):
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
# It should not call begin twice since end was called
self.assertEqual([8, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16], monitor.post_steps)
def test_every_8_call_post_step_at_the_end(self):
monitor = _MyEveryN(every_n_steps=8)
with tf.Graph().as_default() as g, self.test_session(g):
monitor.begin()
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
monitor.step_begin(19)
monitor.step_end(19, output=None)
monitor.post_step(19, session=None)
monitor.end(session=None)
# It should not call begin twice since end was called
self.assertEqual([8, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16, 19], monitor.post_steps)
def test_every_8_call_post_step_should_not_be_called_twice(self):
monitor = _MyEveryN(every_n_steps=8)
with tf.Graph().as_default() as g, self.test_session(g):
monitor.begin()
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
monitor.step_begin(16)
monitor.step_end(16, output=None)
monitor.post_step(16, session=None)
monitor.end(session=None)
# It should not call begin twice since end was called
self.assertEqual([8, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16], monitor.post_steps)
def test_print(self):
with tf.Graph().as_default() as g, self.test_session(g):
t = tf.constant(42.0, name='foo')
self._run_monitor(learn.monitors.PrintTensor(tensor_names=[t.name]))
self.assertRegexpMatches(str(self.logged_message), t.name)
def test_logging_trainable(self):
with tf.Graph().as_default() as g, self.test_session(g):
var = tf.Variable(tf.constant(42.0), name='foo')
var.initializer.run()
cof = tf.constant(1.0)
loss = tf.sub(tf.mul(var, cof), tf.constant(1.0))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
tf.get_default_session().run(train_step)
self._run_monitor(learn.monitors.LoggingTrainable('foo'))
self.assertRegexpMatches(str(self.logged_message), var.name)
def test_summary_saver(self):
with tf.Graph().as_default() as g, self.test_session(g):
log_dir = 'log/dir'
summary_writer = testing.FakeSummaryWriter(log_dir, g)
var = tf.Variable(0.0)
var.initializer.run()
tensor = tf.assign_add(var, 1.0)
summary_op = tf.summary.scalar('my_summary', tensor)
self._run_monitor(
learn.monitors.SummarySaver(
summary_op=summary_op, save_steps=8,
summary_writer=summary_writer),
num_epochs=3, num_steps_per_epoch=10)
summary_writer.assert_summaries(
test_case=self, expected_logdir=log_dir, expected_graph=g,
expected_summaries={
0: {'my_summary': 1.0},
1: {'my_summary': 2.0},
9: {'my_summary': 3.0},
17: {'my_summary': 4.0},
25: {'my_summary': 5.0},
29: {'my_summary': 6.0},
})
def _assert_validation_monitor(
self, monitor, expected_early_stopped=False, expected_best_step=None,
expected_best_value=None):
self.assertEqual(expected_early_stopped, monitor.early_stopped)
self.assertEqual(expected_best_step, monitor.best_step)
self.assertEqual(expected_best_value, monitor.best_value)
def test_validation_monitor_no_estimator(self):
monitor = learn.monitors.ValidationMonitor(
x=tf.constant(2.0), every_n_steps=0)
self._assert_validation_monitor(monitor)
with tf.Graph().as_default() as g, self.test_session(g):
with self.assertRaisesRegexp(ValueError, 'set_estimator'):
self._run_monitor(monitor)
@tf.test.mock.patch('tensorflow.contrib.learn.python.learn.estimators.Estimator', autospec=True) # pylint: disable=line-too-long
@tf.test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor_no_ckpt(
self, mock_latest_checkpoint, mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
mock_latest_checkpoint.return_value = None
# Do nothing with no checkpoint.
monitor = learn.monitors.ValidationMonitor(
x=tf.constant(2.0), every_n_steps=0)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor)
self._assert_validation_monitor(monitor)
mock_latest_checkpoint.assert_called_with(model_dir)
@tf.test.mock.patch('tensorflow.contrib.learn.python.learn.estimators.Estimator', autospec=True) # pylint: disable=line-too-long
@tf.test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor_no_early_stopping_rounds(
self, mock_latest_checkpoint, mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
estimator.evaluate.return_value = {}
mock_latest_checkpoint.return_value = '%s/ckpt' % model_dir
# Do nothing with early_stopping_rounds=None.
monitor = learn.monitors.ValidationMonitor(
x=tf.constant(2.0), every_n_steps=0)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor)
self._assert_validation_monitor(monitor)
@tf.test.mock.patch('tensorflow.contrib.learn.python.learn.estimators.Estimator', autospec=True) # pylint: disable=line-too-long
@tf.test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor_invalid_metric(
self, mock_latest_checkpoint, mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
estimator.evaluate.return_value = {}
mock_latest_checkpoint.return_value = '%s/ckpt' % model_dir
# Fail for missing metric.
monitor = learn.monitors.ValidationMonitor(
x=tf.constant(2.0), every_n_steps=0, early_stopping_rounds=1)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with tf.Graph().as_default() as g, self.test_session(g):
with self.assertRaisesRegexp(ValueError, 'missing from outputs'):
self._run_monitor(monitor, num_epochs=1, num_steps_per_epoch=1)
@tf.test.mock.patch('tensorflow.contrib.learn.python.learn.estimators.Estimator', autospec=True) # pylint: disable=line-too-long
@tf.test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor(
self, mock_latest_checkpoint, mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
validation_outputs = {'loss': None}
estimator.evaluate.return_value = validation_outputs
monitor = learn.monitors.ValidationMonitor(
x=tf.constant(2.0), every_n_steps=0, early_stopping_rounds=2)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with tf.Graph().as_default() as g, self.test_session(g):
monitor.begin(max_steps=100)
monitor.epoch_begin(epoch=0)
self.assertEqual(0, estimator.evaluate.call_count)
# Step 0, initial loss.
step = 0
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 42.0
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(1, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=0, expected_best_value=42.0)
monitor.post_step(step=step, session=None)
# Step 1, same checkpoint, no eval.
step = 1
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(1, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=0, expected_best_value=42.0)
monitor.post_step(step=step, session=None)
# Step 2, lower loss.
step = 2
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 40.0
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(2, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=2, expected_best_value=40.0)
monitor.post_step(step=step, session=None)
# Step 3, higher loss.
step = 3
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 44.0
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(3, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=2, expected_best_value=40.0)
monitor.post_step(step=step, session=None)
# Step 4, higher loss for 2 steps, early stopping.
step = 4
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 43.0
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertTrue(monitor.step_end(step=step, output={}))
self.assertEqual(4, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_early_stopped=True, expected_best_step=2,
expected_best_value=40.0)
monitor.post_step(step=step, session=None)
monitor.epoch_end(epoch=0)
monitor.end()
def test_graph_dump(self):
monitor0 = learn.monitors.GraphDump()
monitor1 = learn.monitors.GraphDump()
with tf.Graph().as_default() as g, self.test_session(g):
const_var = tf.Variable(42.0, name='my_const')
counter_var = tf.Variable(0.0, name='my_counter')
assign_add = tf.assign_add(counter_var, 1.0, name='my_assign_add')
tf.global_variables_initializer().run()
self._run_monitor(monitor0, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
step: {
const_var.name: 42.0,
counter_var.name: step + 1.0,
assign_add.name: step + 1.0,
} for step in xrange(30)
}, monitor0.data)
self._run_monitor(monitor1, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
step: {
const_var.name: 42.0,
counter_var.name: step + 31.0,
assign_add.name: step + 31.0,
} for step in xrange(30)
}, monitor1.data)
for step in xrange(30):
matched, non_matched = monitor1.compare(monitor0, step=step)
self.assertEqual([const_var.name], matched)
self.assertEqual({
assign_add.name: (step + 31.0, step + 1.0),
counter_var.name: (step + 31.0, step + 1.0),
}, non_matched)
matched, non_matched = monitor0.compare(monitor1, step=step)
self.assertEqual([const_var.name], matched)
self.assertEqual({
assign_add.name: (step + 1.0, step + 31.0),
counter_var.name: (step + 1.0, step + 31.0),
}, non_matched)
def test_capture_variable(self):
monitor = learn.monitors.CaptureVariable(
var_name='my_assign_add:0', every_n=8, first_n=2)
with tf.Graph().as_default() as g, self.test_session(g):
var = tf.Variable(0.0, name='my_var')
var.initializer.run()
tf.assign_add(var, 1.0, name='my_assign_add')
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
0: 1.0,
1: 2.0,
2: 3.0,
10: 4.0,
18: 5.0,
26: 6.0,
29: 7.0,
}, monitor.values)
class StopAtStepTest(tf.test.TestCase):
def test_raise_in_both_last_step_and_num_steps(self):
with self.assertRaises(ValueError):
learn.monitors.StopAtStep(num_steps=10, last_step=20)
def test_stop_based_on_last_step(self):
m = learn.monitors.StopAtStep(last_step=10)
m.step_begin(5)
self.assertFalse(m.step_end(5, None))
m.step_begin(9)
self.assertFalse(m.step_end(9, None))
m.step_begin(10)
self.assertTrue(m.step_end(10, None))
m.step_begin(11)
self.assertTrue(m.step_end(11, None))
def test_stop_based_on_num_step(self):
m = learn.monitors.StopAtStep(num_steps=10)
m.step_begin(5)
self.assertFalse(m.step_end(5, None))
m.step_begin(13)
self.assertFalse(m.step_end(13, None))
m.step_begin(14)
self.assertTrue(m.step_end(14, None))
m.step_begin(15)
self.assertTrue(m.step_end(15, None))
class CheckpointSaverTest(tf.test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = tf.Graph()
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
self.global_step = tf.contrib.framework.get_or_create_global_step()
self.train_op = tf.assign_add(self.global_step, 1)
def tearDown(self):
shutil.rmtree(self.model_dir, ignore_errors=True)
def _run(self, monitor, step, train_op, sess):
monitor.step_begin(step)
sess.run(train_op)
monitor.post_step(step, sess)
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
learn.monitors.CheckpointSaver(
self.model_dir, save_secs=10, save_steps=20)
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
learn.monitors.CheckpointSaver(self.model_dir)
def test_save_secs_saves_in_first_step(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_secs=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self.assertEqual(1, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
# TODO(gunan): Reenable this test after b/32446874 is fixed.
def disabled_test_save_secs_saves_periodically(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_secs=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self._run(monitor, 2, self.train_op, sess)
# Not saved
self.assertEqual(1, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
time.sleep(2.5)
self._run(monitor, 3, self.train_op, sess)
# saved
self.assertEqual(3, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
self._run(monitor, 4, self.train_op, sess)
self._run(monitor, 5, self.train_op, sess)
# Not saved
self.assertEqual(3, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
time.sleep(2.5)
self._run(monitor, 6, self.train_op, sess)
# saved
self.assertEqual(6, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
def test_save_steps_saves_in_first_step(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_steps=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self.assertEqual(1, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_steps=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self._run(monitor, 2, self.train_op, sess)
# Not saved
self.assertEqual(1, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
self._run(monitor, 3, self.train_op, sess)
# saved
self.assertEqual(3, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
self._run(monitor, 4, self.train_op, sess)
# Not saved
self.assertEqual(3, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
self._run(monitor, 5, self.train_op, sess)
# saved
self.assertEqual(5, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
def test_save_saves_at_end(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_secs=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self._run(monitor, 2, self.train_op, sess)
monitor.end(sess)
self.assertEqual(2, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
class FakeMonitor(learn.monitors.BaseMonitor):
def __init__(self):
learn.monitors.BaseMonitor.__init__(self)
self.should_stop = False
self.requested_tensors = []
self.call_counter = Counter()
self.last_begin_step = None
self.last_end_step = None
self.last_post_step = None
def begin(self, max_steps):
self.call_counter['begin'] += 1
def end(self, session):
self.call_counter['end'] += 1
def step_begin(self, step):
self.call_counter['step_begin'] += 1
self.last_begin_step = step
return self.requested_tensors
def step_end(self, step, output):
self.call_counter['step_end'] += 1
self.last_end_step = step
self.output = output
return self.should_stop
def post_step(self, step, session):
self.call_counter['post_step'] += 1
self.last_post_step = step
self.session = session
class RunHookAdapterForMonitorsTest(tf.test.TestCase):
def test_calls_and_steps(self):
with tf.Graph().as_default(), tf.Session() as sess:
global_step_tensor = tf.contrib.framework.create_global_step()
inc_5 = tf.assign_add(global_step_tensor, 5)
mock_mon = FakeMonitor()
mock_mon2 = FakeMonitor()
hook = learn.monitors.RunHookAdapterForMonitors([mock_mon, mock_mon2])
hook.begin()
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.call_counter['begin'], 1)
sess.run(tf.global_variables_initializer())
sess.run(global_step_tensor.assign(10))
mon_sess = monitored_session._HookedSession(sess=sess, hooks=[hook])
mon_sess.run(inc_5)
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.output, {})
self.assertEqual(mon.last_begin_step, 11)
self.assertEqual(mon.last_end_step, 11)
self.assertEqual(mon.last_post_step, 11)
self.assertEqual(mon.call_counter['step_end'], 1)
self.assertEqual(mon.call_counter['step_begin'], 1)
self.assertEqual(mon.call_counter['post_step'], 1)
mon_sess.run(inc_5)
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.output, {})
self.assertEqual(mon.last_begin_step, 16)
self.assertEqual(mon.last_end_step, 16)
self.assertEqual(mon.last_post_step, 16)
self.assertEqual(mon.call_counter['step_end'], 2)
self.assertEqual(mon.call_counter['step_begin'], 2)
self.assertEqual(mon.call_counter['post_step'], 2)
hook.end(sess)
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.call_counter['end'], 1)
def test_requests(self):
with tf.Graph().as_default(), tf.Session() as sess:
tf.contrib.framework.create_global_step()
mock_mon = FakeMonitor()
mock_mon2 = FakeMonitor()
hook = learn.monitors.RunHookAdapterForMonitors([mock_mon, mock_mon2])
hook.begin()
mon_sess = monitored_session._HookedSession(sess=sess, hooks=[hook])
a_tensor = tf.constant([0], name='a_tensor')
tf.constant([5], name='another_tensor')
tf.constant([10], name='third_tensor')
mock_mon.requested_tensors = ['another_tensor']
mock_mon2.requested_tensors = ['third_tensor']
sess.run(tf.global_variables_initializer())
output = mon_sess.run(a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_mon.output['another_tensor'], [5])
self.assertEqual(mock_mon2.output['third_tensor'], [10])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 972,894,082,987,976,000 | 37.908219 | 131 | 0.652713 | false |
koyuawsmbrtn/eclock | windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/win32comext/shell/demos/servers/folder_view.py | 37 | 28883 | # This is a port of the Vista SDK "FolderView" sample, and associated
# notes at http://shellrevealed.com/blogs/shellblog/archive/2007/03/15/Shell-Namespace-Extension_3A00_-Creating-and-Using-the-System-Folder-View-Object.aspx
# A key difference to shell_view.py is that this version uses the default
# IShellView provided by the shell (via SHCreateShellFolderView) rather
# than our own.
# XXX - sadly, it doesn't work quite like the original sample. Oh well,
# another day...
import sys
import os
import pickle
import random
import win32api
import winxpgui as win32gui # the needs vista, let alone xp!
import win32con
import winerror
import commctrl
import pythoncom
from win32com.util import IIDToInterfaceName
from win32com.server.exception import COMException
from win32com.server.util import wrap as _wrap
from win32com.server.util import NewEnum as _NewEnum
from win32com.shell import shell, shellcon
from win32com.axcontrol import axcontrol # IObjectWithSite
from win32com.propsys import propsys
GUID=pythoncom.MakeIID
# If set, output spews to the win32traceutil collector...
debug=0
# wrap a python object in a COM pointer
def wrap(ob, iid=None):
return _wrap(ob, iid, useDispatcher=(debug>0))
def NewEnum(seq, iid):
return _NewEnum(seq, iid=iid, useDispatcher=(debug>0))
# The sample makes heavy use of "string ids" (ie, integer IDs defined in .h
# files, loaded at runtime from a (presumably localized) DLL. We cheat.
_sids = {} # strings, indexed bystring_id,
def LoadString(sid):
return _sids[sid]
# fn to create a unique string ID
_last_ids = 0
def _make_ids(s):
global _last_ids
_last_ids += 1
_sids[_last_ids] = s
return _last_ids
# These strings are what the user sees and would be localized.
# XXX - its possible that the shell might persist these values, so
# this scheme wouldn't really be suitable in a real ap.
IDS_UNSPECIFIED = _make_ids("unspecified")
IDS_SMALL = _make_ids("small")
IDS_MEDIUM = _make_ids("medium")
IDS_LARGE = _make_ids("large")
IDS_CIRCLE = _make_ids("circle")
IDS_TRIANGLE = _make_ids("triangle")
IDS_RECTANGLE = _make_ids("rectangle")
IDS_POLYGON = _make_ids("polygon")
IDS_DISPLAY = _make_ids("Display")
IDS_DISPLAY_TT = _make_ids("Display the item.")
IDS_SETTINGS = _make_ids("Settings")
IDS_SETTING1 = _make_ids("Setting 1")
IDS_SETTING2 = _make_ids("Setting 2")
IDS_SETTING3 = _make_ids("Setting 3")
IDS_SETTINGS_TT = _make_ids("Modify settings.")
IDS_SETTING1_TT = _make_ids("Modify setting 1.")
IDS_SETTING2_TT = _make_ids("Modify setting 2.")
IDS_SETTING3_TT = _make_ids("Modify setting 3.")
IDS_LESSTHAN5 = _make_ids("Less Than 5")
IDS_5ORGREATER = _make_ids("Five or Greater")
del _make_ids, _last_ids
# Other misc resource stuff
IDI_ICON1 = 100
IDI_SETTINGS = 101
# The sample defines a number of "category ids". Each one gets
# its own GUID.
CAT_GUID_NAME=GUID("{de094c9d-c65a-11dc-ba21-005056c00008}")
CAT_GUID_SIZE=GUID("{de094c9e-c65a-11dc-ba21-005056c00008}")
CAT_GUID_SIDES=GUID("{de094c9f-c65a-11dc-ba21-005056c00008}")
CAT_GUID_LEVEL=GUID("{de094ca0-c65a-11dc-ba21-005056c00008}")
# The next category guid is NOT based on a column (see
# ViewCategoryProvider::EnumCategories()...)
CAT_GUID_VALUE="{de094ca1-c65a-11dc-ba21-005056c00008}"
GUID_Display=GUID("{4d6c2fdd-c689-11dc-ba21-005056c00008}")
GUID_Settings=GUID("{4d6c2fde-c689-11dc-ba21-005056c00008}")
GUID_Setting1=GUID("{4d6c2fdf-c689-11dc-ba21-005056c00008}")
GUID_Setting2=GUID("{4d6c2fe0-c689-11dc-ba21-005056c00008}")
GUID_Setting3=GUID("{4d6c2fe1-c689-11dc-ba21-005056c00008}")
# Hrm - not sure what to do about the std keys.
# Probably need a simple parser for propkey.h
PKEY_ItemNameDisplay = ("{B725F130-47EF-101A-A5F1-02608C9EEBAC}", 10)
PKEY_PropList_PreviewDetails = ("{C9944A21-A406-48FE-8225-AEC7E24C211B}", 8)
# Not sure what the "3" here refers to - docs say PID_FIRST_USABLE (2) be
# used. Presumably it is the 'propID' value in the .propdesc file!
# note that the following GUIDs are also references in the .propdesc file
PID_SOMETHING=3
# These are our 'private' PKEYs
# Col 2, name="Sample.AreaSize"
PKEY_Sample_AreaSize=("{d6f5e341-c65c-11dc-ba21-005056c00008}", PID_SOMETHING)
# Col 3, name="Sample.NumberOfSides"
PKEY_Sample_NumberOfSides = ("{d6f5e342-c65c-11dc-ba21-005056c00008}", PID_SOMETHING)
# Col 4, name="Sample.DirectoryLevel"
PKEY_Sample_DirectoryLevel = ("{d6f5e343-c65c-11dc-ba21-005056c00008}", PID_SOMETHING)
# We construct a PIDL from a pickle of a dict - turn it back into a
# dict (we should *never* be called with a PIDL that the last elt is not
# ours, so it is safe to assume we created it (assume->"ass" = "u" + "me" :)
def pidl_to_item(pidl):
# Note that only the *last* elt in the PIDL is certainly ours,
# but it contains everything we need encoded as a dict.
return pickle.loads(pidl[-1])
# Start of msdn sample port...
# make_item_enum replaces the sample's entire EnumIDList.cpp :)
def make_item_enum(level, flags):
pidls = []
nums = """zero one two three four five size seven eight nine ten""".split()
for i, name in enumerate(nums):
size = random.randint(0,255)
sides = 1
while sides in [1,2]:
sides = random.randint(0,5)
is_folder = (i % 2) != 0
# check the flags say to include it.
# (This seems strange; if you ask the same folder for, but appear
skip = False
if not (flags & shellcon.SHCONTF_STORAGE):
if is_folder:
skip = not (flags & shellcon.SHCONTF_FOLDERS)
else:
skip = not (flags & shellcon.SHCONTF_NONFOLDERS)
if not skip:
data = dict(name=name, size=size, sides=sides, level=level, is_folder=is_folder)
pidls.append([pickle.dumps(data)])
return NewEnum(pidls, shell.IID_IEnumIDList)
# start of Utils.cpp port
def DisplayItem(shell_item_array, hwnd_parent=0):
# Get the first ShellItem and display its name
if shell_item_array is None:
msg = "You must select something!"
else:
si = shell_item_array.GetItemAt(0)
name = si.GetDisplayName(shellcon.SIGDN_NORMALDISPLAY)
msg = "%d items selected, first is %r" % (shell_item_array.GetCount(), name)
win32gui.MessageBox(hwnd_parent, msg, "Hello", win32con.MB_OK)
# end of Utils.cpp port
# start of sample's FVCommands.cpp port
class Command:
def __init__(self, guid, ids, ids_tt, idi, flags, callback, children):
self.guid = guid; self.ids = ids; self.ids_tt = ids_tt
self.idi = idi; self.flags = flags; self.callback = callback;
self.children = children
assert not children or isinstance(children[0], Command)
def tuple(self):
return self.guid, self.ids, self.ids_tt, self.idi, self.flags, self.callback, self.children
# command callbacks - called back directly by us - see ExplorerCommand.Invoke
def onDisplay(items, bindctx):
DisplayItem(items)
def onSetting1(items, bindctx):
win32gui.MessageBox(0, LoadString(IDS_SETTING1), "Hello", win32con.MB_OK)
def onSetting2(items, bindctx):
win32gui.MessageBox(0, LoadString(IDS_SETTING2), "Hello", win32con.MB_OK)
def onSetting3(items, bindctx):
win32gui.MessageBox(0, LoadString(IDS_SETTING3), "Hello", win32con.MB_OK)
taskSettings = [
Command(GUID_Setting1, IDS_SETTING1, IDS_SETTING1_TT, IDI_SETTINGS, 0, onSetting1, None),
Command(GUID_Setting2, IDS_SETTING2, IDS_SETTING2_TT, IDI_SETTINGS, 0, onSetting2, None),
Command(GUID_Setting3, IDS_SETTING3, IDS_SETTING3_TT, IDI_SETTINGS, 0, onSetting3, None),
]
tasks = [
Command(GUID_Display, IDS_DISPLAY, IDS_DISPLAY_TT, IDI_ICON1, 0, onDisplay, None ),
Command(GUID_Settings, IDS_SETTINGS, IDS_SETTINGS_TT, IDI_SETTINGS, shellcon.ECF_HASSUBCOMMANDS, None, taskSettings),
]
class ExplorerCommandProvider:
_com_interfaces_ = [shell.IID_IExplorerCommandProvider]
_public_methods_ = shellcon.IExplorerCommandProvider_Methods
def GetCommands(self, site, iid):
items = [wrap(ExplorerCommand(t)) for t in tasks]
return NewEnum(items, shell.IID_IEnumExplorerCommand)
class ExplorerCommand:
_com_interfaces_ = [shell.IID_IExplorerCommand]
_public_methods_ = shellcon.IExplorerCommand_Methods
def __init__(self, cmd):
self.cmd = cmd
# The sample also appears to ignore the pidl args!?
def GetTitle(self, pidl):
return LoadString(self.cmd.ids)
def GetToolTip(self, pidl):
return LoadString(self.cmd.ids_tt)
def GetIcon(self, pidl):
# Return a string of the usual "dll,resource_id" format
# todo - just return any ".ico that comes with python" + ",0" :)
raise COMException(hresult=winerror.E_NOTIMPL)
def GetState(self, shell_items, slow_ok):
return shellcon.ECS_ENABLED
def GetFlags(self):
return self.cmd.flags
def GetCanonicalName(self):
return self.cmd.guid
def Invoke(self, items, bind_ctx):
# If no function defined - just return S_OK
if self.cmd.callback:
self.cmd.callback(items, bind_ctx)
else:
print "No callback for command ", LoadString(self.cmd.ids)
def EnumSubCommands(self):
if not self.cmd.children:
return None
items = [wrap(ExplorerCommand(c))
for c in self.cmd.children]
return NewEnum(items, shell.IID_IEnumExplorerCommand)
# end of sample's FVCommands.cpp port
# start of sample's Category.cpp port
class FolderViewCategorizer:
_com_interfaces_ = [shell.IID_ICategorizer]
_public_methods_ = shellcon.ICategorizer_Methods
description = None # subclasses should set their own
def __init__(self, shell_folder):
self.sf = shell_folder
# Determines the relative order of two items in their item identifier lists.
def CompareCategory(self, flags, cat1, cat2):
return cat1-cat2
# Retrieves the name of a categorizer, such as "Group By Device
# Type", that can be displayed in the user interface.
def GetDescription(self, cch):
return self.description
# Retrieves information about a category, such as the default
# display and the text to display in the user interface.
def GetCategoryInfo(self, catid):
# Note: this isn't always appropriate! See overrides below
return 0, str(catid) # ????
class FolderViewCategorizer_Name(FolderViewCategorizer):
description = "Alphabetical"
def GetCategory(self, pidls):
ret = []
for pidl in pidls:
val = self.sf.GetDetailsEx(pidl, PKEY_ItemNameDisplay)
ret.append(val)
return ret
class FolderViewCategorizer_Size(FolderViewCategorizer):
description = "Group By Size"
def GetCategory(self, pidls):
ret = []
for pidl in pidls:
# Why don't we just get the size of the PIDL?
val = self.sf.GetDetailsEx(pidl, PKEY_Sample_AreaSize)
val = int(val) # it probably came in a VT_BSTR variant
if val < 255//3:
cid = IDS_SMALL
elif val < 2 * 255 // 3:
cid = IDS_MEDIUM
else:
cid = IDS_LARGE
ret.append(cid)
return ret
def GetCategoryInfo(self, catid):
return 0, LoadString(catid)
class FolderViewCategorizer_Sides(FolderViewCategorizer):
description = "Group By Sides"
def GetCategory(self, pidls):
ret = []
for pidl in pidls:
val = self.sf.GetDetailsEx(pidl, PKEY_ItemNameDisplay)
if val==0:
cid = IDS_CIRCLE
elif val==3:
cid = IDS_TRIANGLE
elif val==4:
cid = IDS_RECTANGLE
elif val==5:
cid = IDS_POLYGON
else:
cid = IDS_UNSPECIFIED
ret.append(cid)
return ret
def GetCategoryInfo(self, catid):
return 0, LoadString(catid)
class FolderViewCategorizer_Value(FolderViewCategorizer):
description = "Group By Value"
def GetCategory(self, pidls):
ret = []
for pidl in pidls:
val = self.sf.GetDetailsEx(pidl, PKEY_ItemNameDisplay)
if val in "one two three four".split():
ret.append(IDS_LESSTHAN5)
else:
ret.append(IDS_5ORGREATER)
return ret
def GetCategoryInfo(self, catid):
return 0, LoadString(catid)
class FolderViewCategorizer_Level(FolderViewCategorizer):
description = "Group By Value"
def GetCategory(self, pidls):
return [self.sf.GetDetailsEx(pidl, PKEY_Sample_DirectoryLevel) for pidl in pidls]
class ViewCategoryProvider:
_com_interfaces_ = [shell.IID_ICategoryProvider]
_public_methods_ = shellcon.ICategoryProvider_Methods
def __init__(self, shell_folder):
self.shell_folder = shell_folder
def CanCategorizeOnSCID(self, pkey):
return pkey in [PKEY_ItemNameDisplay, PKEY_Sample_AreaSize,
PKEY_Sample_NumberOfSides, PKEY_Sample_DirectoryLevel]
# Creates a category object.
def CreateCategory(self, guid, iid):
if iid == shell.IID_ICategorizer:
if guid == CAT_GUID_NAME:
klass = FolderViewCategorizer_Name
elif guid == CAT_GUID_SIDES:
klass = FolderViewCategorizer_Sides
elif guid == CAT_GUID_SIZE:
klass = FolderViewCategorizer_Size
elif guid == CAT_GUID_VALUE:
klass = FolderViewCategorizer_Value
elif guid == CAT_GUID_LEVEL:
klass = FolderViewCategorizer_Level
else:
raise COMException(hresult=winerror.E_INVALIDARG)
return wrap(klass(self.shell_folder))
raise COMException(hresult=winerror.E_NOINTERFACE)
# Retrieves the enumerator for the categories.
def EnumCategories(self):
# These are additional categories beyond the columns
seq = [CAT_GUID_VALUE]
return NewEnum(seq, pythoncom.IID_IEnumGUID)
# Retrieves a globally unique identifier (GUID) that represents
# the categorizer to use for the specified Shell column.
def GetCategoryForSCID(self, scid):
if scid==PKEY_ItemNameDisplay:
guid = CAT_GUID_NAME
elif scid == PKEY_Sample_AreaSize:
guid = CAT_GUID_SIZE
elif scid == PKEY_Sample_NumberOfSides:
guid = CAT_GUID_SIDES
elif scid == PKEY_Sample_DirectoryLevel:
guid = CAT_GUID_LEVEL
elif scid == pythoncom.IID_NULL:
# This can be called with a NULL
# format ID. This will happen if you have a category,
# not based on a column, that gets stored in the
# property bag. When a return is made to this item,
# it will call this function with a NULL format id.
guid = CAT_GUID_VALUE
else:
raise COMException(hresult=winerror.E_INVALIDARG)
return guid
# Retrieves the name of the specified category. This is where
# additional categories that appear under the column
# related categories in the UI, get their display names.
def GetCategoryName(self, guid, cch):
if guid == CAT_GUID_VALUE:
return "Value"
raise COMException(hresult=winerror.E_FAIL)
# Enables the folder to override the default grouping.
def GetDefaultCategory(self):
return CAT_GUID_LEVEL, (pythoncom.IID_NULL, 0)
# end of sample's Category.cpp port
# start of sample's ContextMenu.cpp port
MENUVERB_DISPLAY = 0
folderViewImplContextMenuIDs = [
("display", MENUVERB_DISPLAY, 0, ),
]
class ContextMenu:
_reg_progid_ = "Python.ShellFolderSample.ContextMenu"
_reg_desc_ = "Python FolderView Context Menu"
_reg_clsid_ = "{fed40039-021f-4011-87c5-6188b9979764}"
_com_interfaces_ = [shell.IID_IShellExtInit, shell.IID_IContextMenu, axcontrol.IID_IObjectWithSite]
_public_methods_ = shellcon.IContextMenu_Methods + shellcon.IShellExtInit_Methods + ["GetSite", "SetSite"]
_context_menu_type_ = "PythonFolderViewSampleType"
def __init__(self):
self.site = None
self.dataobj = None
def Initialize(self, folder, dataobj, hkey):
self.dataobj = dataobj
def QueryContextMenu(self, hMenu, indexMenu, idCmdFirst, idCmdLast, uFlags):
s = LoadString(IDS_DISPLAY);
win32gui.InsertMenu(hMenu, indexMenu, win32con.MF_BYPOSITION, idCmdFirst + MENUVERB_DISPLAY, s);
indexMenu += 1
# other verbs could go here...
# indicate that we added one verb.
return 1
def InvokeCommand(self, ci):
mask, hwnd, verb, params, dir, nShow, hotkey, hicon = ci
# this seems very convuluted, but its what the sample does :)
for verb_name, verb_id, flag in folderViewImplContextMenuIDs:
if isinstance(verb, int):
matches = verb==verb_id
else:
matches = verb==verb_name
if matches:
break
else:
assert False, ci # failed to find our ID
if verb_id == MENUVERB_DISPLAY:
sia = shell.SHCreateShellItemArrayFromDataObject(self.dataobj)
DisplayItem(hwnd, sia)
else:
assert False, ci # Got some verb we weren't expecting?
def GetCommandString(self, cmd, typ):
raise COMException(hresult=winerror.E_NOTIMPL)
def SetSite(self, site):
self.site = site
def GetSite(self, iid):
return self.site
# end of sample's ContextMenu.cpp port
# start of sample's ShellFolder.cpp port
class ShellFolder:
_com_interfaces_ = [shell.IID_IBrowserFrameOptions,
pythoncom.IID_IPersist,
shell.IID_IPersistFolder,
shell.IID_IPersistFolder2,
shell.IID_IShellFolder,
shell.IID_IShellFolder2,
]
_public_methods_ = shellcon.IBrowserFrame_Methods + \
shellcon.IPersistFolder2_Methods + \
shellcon.IShellFolder2_Methods
_reg_progid_ = "Python.ShellFolderSample.Folder2"
_reg_desc_ = "Python FolderView sample"
_reg_clsid_ = "{bb8c24ad-6aaa-4cec-ac5e-c429d5f57627}"
max_levels = 5
def __init__(self, level=0):
self.current_level = level
self.pidl = None # set when Initialize is called
def ParseDisplayName(self, hwnd, reserved, displayName, attr):
#print "ParseDisplayName", displayName
raise COMException(hresult=winerror.E_NOTIMPL)
def EnumObjects(self, hwndOwner, flags):
if self.current_level >= self.max_levels:
return None
return make_item_enum(self.current_level+1, flags)
def BindToObject(self, pidl, bc, iid):
tail = pidl_to_item(pidl)
# assert tail['is_folder'], "BindToObject should only be called on folders?"
# *sob*
# No point creating object just to have QI fail.
if iid not in ShellFolder._com_interfaces_:
raise COMException(hresult=winerror.E_NOTIMPL)
child = ShellFolder(self.current_level+1)
# hrmph - not sure what multiple PIDLs here mean?
# assert len(pidl)==1, pidl # expecting just relative child PIDL
child.Initialize(self.pidl + pidl)
return wrap(child, iid)
def BindToStorage(self, pidl, bc, iid):
return self.BindToObject(pidl, bc, iid)
def CompareIDs(self, param, id1, id2):
return 0 # XXX - todo - implement this!
def CreateViewObject(self, hwnd, iid):
if iid == shell.IID_IShellView:
com_folder = wrap(self)
return shell.SHCreateShellFolderView(com_folder)
elif iid == shell.IID_ICategoryProvider:
return wrap(ViewCategoryProvider(self))
elif iid == shell.IID_IContextMenu:
ws = wrap(self)
dcm = (hwnd, None, self.pidl, ws, None)
return shell.SHCreateDefaultContextMenu(dcm, iid)
elif iid == shell.IID_IExplorerCommandProvider:
return wrap(ExplorerCommandProvider())
else:
raise COMException(hresult=winerror.E_NOINTERFACE)
def GetAttributesOf(self, pidls, attrFlags):
assert len(pidls)==1, "sample only expects 1 too!"
assert len(pidls[0])==1, "expect relative pidls!"
item = pidl_to_item(pidls[0])
flags = 0
if item['is_folder']:
flags |= shellcon.SFGAO_FOLDER
if item['level'] < self.max_levels:
flags |= shellcon.SFGAO_HASSUBFOLDER
return flags
# Retrieves an OLE interface that can be used to carry out
# actions on the specified file objects or folders.
def GetUIObjectOf(self, hwndOwner, pidls, iid, inout):
assert len(pidls)==1, "oops - arent expecting more than one!"
assert len(pidls[0])==1, "assuming relative pidls!"
item = pidl_to_item(pidls[0])
if iid == shell.IID_IContextMenu:
ws = wrap(self)
dcm = (hwndOwner, None, self.pidl, ws, pidls)
return shell.SHCreateDefaultContextMenu(dcm, iid)
elif iid == shell.IID_IExtractIconW:
dxi = shell.SHCreateDefaultExtractIcon()
# dxi is IDefaultExtractIconInit
if item['is_folder']:
dxi.SetNormalIcon("shell32.dll", 4)
else:
dxi.SetNormalIcon("shell32.dll", 1)
# just return the dxi - let Python QI for IID_IExtractIconW
return dxi
elif iid == pythoncom.IID_IDataObject:
return shell.SHCreateDataObject(self.pidl, pidls, None, iid);
elif iid == shell.IID_IQueryAssociations:
elts = []
if item['is_folder']:
elts.append((shellcon.ASSOCCLASS_FOLDER, None, None))
elts.append((shellcon.ASSOCCLASS_PROGID_STR, None, ContextMenu._context_menu_type_))
return shell.AssocCreateForClasses(elts, iid)
raise COMException(hresult=winerror.E_NOINTERFACE)
# Retrieves the display name for the specified file object or subfolder.
def GetDisplayNameOf(self, pidl, flags):
item = pidl_to_item(pidl)
if flags & shellcon.SHGDN_FORPARSING:
if flags & shellcon.SHGDN_INFOLDER:
return item['name']
else:
if flags & shellcon.SHGDN_FORADDRESSBAR:
sigdn = shellcon.SIGDN_DESKTOPABSOLUTEEDITING
else:
sigdn = shellcon.SIGDN_DESKTOPABSOLUTEPARSING
parent = shell.SHGetNameFromIDList(self.pidl, sigdn)
return parent + "\\" + item['name']
else:
return item['name']
def SetNameOf(self, hwndOwner, pidl, new_name, flags):
raise COMException(hresult=winerror.E_NOTIMPL)
def GetClassID(self):
return self._reg_clsid_
# IPersistFolder method
def Initialize(self, pidl):
self.pidl = pidl
# IShellFolder2 methods
def EnumSearches(self):
raise COMException(hresult=winerror.E_NOINTERFACE)
# Retrieves the default sorting and display columns.
def GetDefaultColumn(self, dwres):
# result is (sort, display)
return 0, 0
# Retrieves the default state for a specified column.
def GetDefaultColumnState(self, iCol):
if iCol < 3:
return shellcon.SHCOLSTATE_ONBYDEFAULT | shellcon.SHCOLSTATE_TYPE_STR
raise COMException(hresult=winerror.E_INVALIDARG)
# Requests the GUID of the default search object for the folder.
def GetDefaultSearchGUID(self):
raise COMException(hresult=winerror.E_NOTIMPL)
# Helper function for getting the display name for a column.
def _GetColumnDisplayName(self, pidl, pkey):
item = pidl_to_item(pidl)
is_folder = item['is_folder']
if pkey == PKEY_ItemNameDisplay:
val = item['name']
elif pkey == PKEY_Sample_AreaSize and not is_folder:
val = "%d Sq. Ft." % item['size']
elif pkey == PKEY_Sample_NumberOfSides and not is_folder:
val = str(item['sides']) # not sure why str()
elif pkey == PKEY_Sample_DirectoryLevel:
val = str(item['level'])
else:
val = ''
return val
# Retrieves detailed information, identified by a
# property set ID (FMTID) and property ID (PID),
# on an item in a Shell folder.
def GetDetailsEx(self, pidl, pkey):
item = pidl_to_item(pidl)
is_folder = item['is_folder']
if not is_folder and pkey == PKEY_PropList_PreviewDetails:
return "prop:Sample.AreaSize;Sample.NumberOfSides;Sample.DirectoryLevel"
return self._GetColumnDisplayName(pidl, pkey)
# Retrieves detailed information, identified by a
# column index, on an item in a Shell folder.
def GetDetailsOf(self, pidl, iCol):
key = self.MapColumnToSCID(iCol);
if pidl is None:
data = [(commctrl.LVCFMT_LEFT, "Name"),
(commctrl.LVCFMT_CENTER, "Size"),
(commctrl.LVCFMT_CENTER, "Sides"),
(commctrl.LVCFMT_CENTER, "Level"),]
if iCol >= len(data):
raise COMException(hresult=winerror.E_FAIL)
fmt, val = data[iCol]
else:
fmt = 0 # ?
val = self._GetColumnDisplayName(pidl, key)
cxChar = 24
return fmt, cxChar, val
# Converts a column name to the appropriate
# property set ID (FMTID) and property ID (PID).
def MapColumnToSCID(self, iCol):
data = [PKEY_ItemNameDisplay, PKEY_Sample_AreaSize,
PKEY_Sample_NumberOfSides, PKEY_Sample_DirectoryLevel]
if iCol >= len(data):
raise COMException(hresult=winerror.E_FAIL)
return data[iCol]
# IPersistFolder2 methods
# Retrieves the PIDLIST_ABSOLUTE for the folder object.
def GetCurFolder(self):
# The docs say this is OK, but I suspect its a problem in this case :)
#assert self.pidl, "haven't been initialized?"
return self.pidl
# end of sample's ShellFolder.cpp port
def get_schema_fname():
me = win32api.GetFullPathName(__file__)
sc = os.path.splitext(me)[0] + ".propdesc"
assert os.path.isfile(sc), sc
return sc
def DllRegisterServer():
import _winreg
if sys.getwindowsversion()[0] < 6:
print "This sample only works on Vista"
sys.exit(1)
key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\" \
"Explorer\\Desktop\\Namespace\\" + \
ShellFolder._reg_clsid_)
_winreg.SetValueEx(key, None, 0, _winreg.REG_SZ, ShellFolder._reg_desc_)
# And special shell keys under our CLSID
key = _winreg.CreateKey(_winreg.HKEY_CLASSES_ROOT,
"CLSID\\" + ShellFolder._reg_clsid_ + "\\ShellFolder")
# 'Attributes' is an int stored as a binary! use struct
attr = shellcon.SFGAO_FOLDER | shellcon.SFGAO_HASSUBFOLDER | \
shellcon.SFGAO_BROWSABLE
import struct
s = struct.pack("i", attr)
_winreg.SetValueEx(key, "Attributes", 0, _winreg.REG_BINARY, s)
# register the context menu handler under the FolderViewSampleType type.
keypath = "%s\\shellex\\ContextMenuHandlers\\%s" % (ContextMenu._context_menu_type_, ContextMenu._reg_desc_)
key = _winreg.CreateKey(_winreg.HKEY_CLASSES_ROOT, keypath)
_winreg.SetValueEx(key, None, 0, _winreg.REG_SZ, ContextMenu._reg_clsid_)
propsys.PSRegisterPropertySchema(get_schema_fname())
print ShellFolder._reg_desc_, "registration complete."
def DllUnregisterServer():
import _winreg
paths = [
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Desktop\\Namespace\\" + ShellFolder._reg_clsid_,
"%s\\shellex\\ContextMenuHandlers\\%s" % (ContextMenu._context_menu_type_, ContextMenu._reg_desc_),
]
for path in paths:
try:
_winreg.DeleteKey(_winreg.HKEY_LOCAL_MACHINE, path)
except WindowsError, details:
import errno
if details.errno != errno.ENOENT:
print "FAILED to remove %s: %s" % (path, details)
propsys.PSUnregisterPropertySchema(get_schema_fname())
print ShellFolder._reg_desc_, "unregistration complete."
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ShellFolder, ContextMenu,
debug = debug,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
| gpl-2.0 | -7,067,755,187,175,611,000 | 38.729023 | 156 | 0.642662 | false |
swaprava/matching | advisor_matching_cgi.py | 1 | 3624 | #!/usr/bin/env python
"""
Created on Thu Jul 16 19:57:23 2015
Build a webpage where the inputs are provided by the user
This program runs and matches students to advisors using
the Gale-Shapley deferred acceptance algorithm
@author: swaprava
"""
import cgi
import cgitb
cgitb.enable()
print 'Content-type:text/html\r\n\r\n'
print '<html>'
print '<head><title>Deferred acceptance matching</title></head>'
print '<body>'
print '<br /><h3>Gale-Shapley deferred acceptance matching advisors to students</h3>'
print 'This application takes individual student\'s and advisor\'s preferences as input.<br />Additionally, it also asks for advisors\' capacities and returns the matching<br />of the students to advisors using the algorithm.'
studentAdvisorinfoGiven = False
preferencesGiven = False
form = cgi.FieldStorage()
if form.getvalue('numOfstudents'):
if form.getvalue('numOfadvisors'):
numOfstudents = int(form.getvalue('numOfstudents'))
numOfadvisors = int(form.getvalue('numOfadvisors'))
studentAdvisorinfoGiven = True
if not preferencesGiven:
print '<p><i>Now the fun begins!</i></p>'
print 'Give the preferences:<br />'
print 'Enter the preferences of the students and advisors in comma separated form<br />'
print 'Numbers denote both the students and advisors<br />'
print 'Example: (for 4 advisors) <b>4,3,1,2</b> denotes that the student preference over advisors<br />'
print 'similarly (for 5 students) <b>3,2,1,5,4</b> denotes that the advisor preference over students<br />'
print '<br/><h4>Students and their preferences</h4>'
print '<form method="post">'
for student in xrange(numOfstudents):
pref = 'studpref' + str(student)
name = 'studname' + str(student)
print '<p>Enter name and preference of Student %d:<br />   Name: <input type="text" name="%s" required> Preference: <input type="text" name="%s" required></p>' % (student+1, name, pref)
print '<br/><h4>Advisors, their preferences, and capacities</h4>'
for advisor in xrange(numOfadvisors):
pref = 'advpref' + str(advisor)
name = 'advname' + str(advisor)
capacity = 'advcapa' + str(advisor)
print '<p>Enter name and preference of Advisor %d:<br />   Name: <input type="text" name="%s" required> Preference: <input type="text" name="%s" required> Capacity: <input type="text" size="3" name="%s" required></p>' % (advisor+1, name, pref, capacity)
print '<input type="hidden" name="numOfstudents" value="%s">' % numOfstudents
print '<input type="hidden" name="numOfadvisors" value="%s">' % numOfadvisors
print '<br/><input type="submit" formaction="advisor_matching_stud_prop.py" value="Match using student proposing version">'
# print ' '
# print '<input type="submit" formaction="" value="Match using advisor proposing version">'
print '</form>'
if not studentAdvisorinfoGiven:
print '<form method="post" action=advisor_matching_cgi.py>'
print '<p>Enter the number of students   <input type="text" name="numOfstudents" required></p>'
print '<p>Enter the number of advisors   <input type="text" name="numOfadvisors" required></p>'
print '<input type="submit" value="Submit">'
print '</form>'
print '</body>'
print '</html>' | gpl-2.0 | 6,272,962,804,356,684,000 | 44.886076 | 288 | 0.633278 | false |
Just-D/panda3d | direct/src/plugin_npapi/make_osx_bundle.py | 11 | 3004 | #! /usr/bin/env python
"""
This script constructs the bundle directory structure for the OSX web
plugin that is built by the code in this directory. It takes no
parameters, and produces the plugin bundle in the same place.
"""
import getopt
import sys
import os
import glob
import shutil
import direct
from pandac.PandaModules import Filename, DSearchPath
def usage(code, msg = ''):
print >> sys.stderr, __doc__
print >> sys.stderr, msg
sys.exit(code)
def makeBundle(startDir):
fstartDir = Filename.fromOsSpecific(startDir)
# Search for nppandad along $DYLD_LIBRARY_PATH.
path = DSearchPath()
if 'LD_LIBRARY_PATH' in os.environ:
path.appendPath(os.environ['LD_LIBRARY_PATH'])
if 'DYLD_LIBRARY_PATH' in os.environ:
path.appendPath(os.environ['DYLD_LIBRARY_PATH'])
nppanda3d = path.findFile('nppanda3d')
if not nppanda3d:
raise StandardError, "Couldn't find nppanda3d on path."
# Generate the bundle directory structure
rootFilename = Filename(fstartDir, 'bundle')
if os.path.exists(rootFilename.toOsSpecific()):
shutil.rmtree(rootFilename.toOsSpecific())
bundleFilename = Filename(rootFilename, 'nppanda3d.plugin')
plistFilename = Filename(bundleFilename, 'Contents/Info.plist')
plistFilename.makeDir()
exeFilename = Filename(bundleFilename, 'Contents/MacOS/nppanda3d')
exeFilename.makeDir()
resourceFilename = Filename(bundleFilename, 'Contents/Resources/nppanda3d.rsrc')
resourceFilename.makeDir()
# Compile the .r file to an .rsrc file.
os.system('/Developer/Tools/Rez -useDF -o %s %s' % (
resourceFilename.toOsSpecific(), Filename(fstartDir, "nppanda3d.r").toOsSpecific()))
if not resourceFilename.exists():
raise IOError, 'Unable to run Rez'
# Copy in Info.plist and the compiled executable.
shutil.copyfile(Filename(fstartDir, "nppanda3d.plist").toOsSpecific(), plistFilename.toOsSpecific())
shutil.copyfile(nppanda3d.toOsSpecific(), exeFilename.toOsSpecific())
# All done!
bundleFilename.touch()
print bundleFilename.toOsSpecific()
def buildDmg(startDir):
fstartDir = Filename.fromOsSpecific(startDir)
rootFilename = Filename(fstartDir, 'bundle')
output = Filename(fstartDir, 'nppanda3d.dmg')
output.unlink()
cmd = 'hdiutil create -fs HFS+ -srcfolder "%(dir)s" -volname "%(volname)s" "%(output)s"' % {
'dir' : rootFilename.toOsSpecific(),
'volname' : 'nppanda3d',
'output' : output.toOsSpecific(),
}
os.system(cmd)
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'h')
except getopt.error, msg:
usage(1, msg)
for opt, arg in opts:
if opt == '-h':
usage(0)
if args:
usage(1, 'No arguments are expected.')
startDir = os.path.split(sys.argv[0])[0]
makeBundle(startDir)
# We don't need the dmg these days; the installer is better.
#buildDmg(startDir)
| bsd-3-clause | 5,820,778,329,045,054,000 | 29.969072 | 104 | 0.675766 | false |
vhscampos/range-analysis | prototype/PythonRangeAnalysis/bck/RA7.py | 1 | 19058 | class InvalidIntervalException(Exception):
def __init__(self, l, u):
self.l = l
self.u = u
def __str__(self):
return "Invalid interval [" + str(l) + ", " + str(u) + "]"
class ArithmeticException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def lt(x, y):
if x == y:
return False
elif x == '-' or y == '+':
return True
elif y == '-' or x == '+':
return False
else:
return x < y
def leq(x, y):
if x == y or x == '-' or y == '+':
return True
elif y == '-' or x == '+':
return False
else:
return x < y
def gt(x, y):
if x == y:
return False
elif x == '+' or y == '-':
return True
elif y == '+' or x == '-':
return False
else:
return x > y
def geq(x, y):
if x == y or x == '+' or y == '-':
return True
elif y == '+' or x == '-':
return False
else:
return x > y
def min(x, y):
if lt(x, y):
return x
else:
return y
def max(x, y):
if gt(x, y):
return x
else:
return y
def add(x, y):
if (x == '-' and y == '+') or (y == '-' and x == '+'):
raise ArithmeticException("Adding minus and plus infinity.")
elif x == '-' or y == '-':
return '-'
elif x == '+' or y == '+':
return '+'
else:
return x + y
def mul(x, y):
if (x == 0 or y == '0'):
return 0
elif (x == '-' and lt(y, 0)) or (y == '-' and lt(x, 0)):
return '+'
elif (x == '+' and gt(y, 0)) or (y == '+' and gt(x, 0)):
return '+'
elif (x == '-' and gt(y, 0)) or (y == '-' and gt(x, 0)):
return '-'
elif (x == '+' and lt(y, 0)) or (y == '+' and lt(x, 0)):
return '-'
else:
return x * y
class Interval:
def __init__(self, l='-', u='+'):
if (l != '-' and u != '+'):
if (l > u):
raise InvalidIntervalException(l, u)
self.l = l;
self.u = u;
def intersection(self, i):
if (gt(self.l, i.u) or lt(self.u, i.l)):
return BottomInterval()
else:
return Interval(max(self.l, i.l), min(self.u, i.u))
def __str__(self):
return "[" + str(self.l) + ", " + str(self.u) + "]"
class SymbolicInterval(Interval):
"""This is an interval that contains a symbolic limit, which is given by
the bounds of a program name, e.g.: [-inf, ub(b) + 1]"""
def __init__(self, bound, op='=='):
self.bound = bound
self.op = op
self.l = '-'
self.u = '+'
def __str__(self):
if self.op == '==':
return "[lb(" + self.bound.name + "), ub(" + self.bound.name + ")]"
elif self.op == '<=':
return "[-, ub(" + self.bound.name + ")]"
elif self.op == '<':
return "[-, ub(" + self.bound.name + ") - 1]"
elif self.op == '>=':
return "[lb(" + self.bound.name + "), +]"
elif self.op == '>':
return "[lb(" + self.bound.name + ") + 1, +]"
else:
return "[" + str(self.l) + ", " + str(self.u) + "]"
class BottomInterval(Interval):
"""This interval is used to represent the empty interval. It arises, for
instance, from the intersection of disjoint intervals."""
def __str__(self):
return "[., .]"
class VarNode:
"""A VarNode represents a program variable."""
def __init__(self, name, interval=Interval()):
self.name = name
self.interval = interval
def __str__(self):
return self.name + str(self.interval)
class UnaryOp:
"""A constraint like sink = a * source + b \intersec [l, u]"""
def __init__(self, source, sink, a=1, b=0, interval=Interval('-', '+')):
self.source = source
self.sink = sink
self.a = a
self.b = b
self.i = interval
def __str__(self):
self_str = str(self.sink) + " = " + str(self.a) + "* " + str(self.source)
self_str += str(self.b) + " \int " + str(self.i)
return self_str
def eval(self):
"""Read the interval in source, apply the operation on it, and return it."""
l = add(mul(self.a, self.source.interval.l), self.b)
u = add(mul(self.a, self.source.interval.u), self.b)
auxInterval = Interval(l, u)
return auxInterval.intersection(self.i)
def fixIntersects(self):
"""Replace symbolic intervals with hard-wired constants."""
if isinstance(self.i, SymbolicInterval):
l = self.i.bound.interval.l
u = self.i.bound.interval.u
if self.i.op == '==':
self.i = Interval(l, u)
elif self.i.op == '<=':
self.i = Interval(self.i.l, u)
elif self.i.op == '<':
self.i = Interval(self.i.l, u - 1)
elif self.i.op == '>=':
self.i = Interval(l, self.i.u)
elif self.i.op == '>':
self.i = Interval(l + 1, self.i.u)
else:
self.i = Interval()
def toDotStr(self):
lb = " " + str(hash(self)) + " [shape=box,label =\""
space = ""
if self.a != 1:
lb += "*(" + str(self.a) + ")"
space = " "
if self.b != 0:
lb += space + "+(" + str(self.b) + ")"
space = " "
if isinstance(self.i, SymbolicInterval) or self.i.l != '-' or self.i.u != '+':
lb += space + "INT" + str(self.i)
lb += "\"]\n"
lb += " " + self.source.name + " -> " + str(hash(self)) + "\n"
lb += " " + str(hash(self)) + " -> " + self.sink.name + "\n"
return lb
class PlusOp:
"""A constraint like sink = src1 + src2"""
def __init__(self, src1, src2, sink):
self.src1 = src1
self.src2 = src2
self.sink = sink
def __str__(self):
return self.sink.name + " = " + self.src1.name + " + " + self.src2.name
def eval(self):
"""Read the interval in source, apply the operation on it, and return it."""
int1 = self.src1.interval
int2 = self.src2.interval
return Interval(add(int1.l, int2.l), add(int1.u, int2.u))
def fixIntersects(self):
"""Replace symbolic intervals with hard-wired constants. Normally this
kind of operations have no intersect to fix, but the method is here so
that we can invoke it on any kind of operation."""
def toDotStr(self):
lb = " " + str(hash(self)) + " [shape=box,label =\" + \"]\n"
lb += " " + self.src1.name + " -> " + str(hash(self)) + "\n"
lb += " " + self.src2.name + " -> " + str(hash(self)) + "\n"
lb += " " + str(hash(self)) + " -> " + self.sink.name + "\n"
return lb
class PhiOp:
"""A constraint like sink = phi(src1, src2)"""
def __init__(self, src1, src2, sink):
self.src1 = src1
self.src2 = src2
self.sink = sink
def __str__(self):
return self.sink.name + " =phi (" + str(self.src1) + ", " + str(self.src2) + ")"
def eval(self):
"""The result of evaluating the phi-function is the union of the ranges of
every variable used in the phi."""
int1 = self.src1.interval
int2 = self.src2.interval
# Remember, the union of bottom and anythin is anything:
if isinstance(int1, BottomInterval):
return Interval(int2.l, int2.u)
elif isinstance(int2, BottomInterval):
return Interval(int1.l, int1.u)
return Interval(min(int1.l, int2.l), max(int1.u, int2.u))
def fixIntersects(self):
"""Replace symbolic intervals with hard-wired constants. Normally this
kind of operations have no intersect to fix, but the method is here so
that we can invoke it on any kind of operation."""
def toDotStr(self):
lb = " " + str(hash(self)) + " [shape=box,label =\" phi \"]\n"
lb += " " + self.src1.name + " -> " + str(hash(self)) + "\n"
lb += " " + self.src2.name + " -> " + str(hash(self)) + "\n"
lb += " " + str(hash(self)) + " -> " + self.sink.name + "\n"
return lb
# Graph 0:
Variables0 = {
'k' : VarNode('k', Interval(0, 0)),
'k0': VarNode('k0', Interval(100, 100)),
'k1': VarNode('k1', BottomInterval()),
'k2': VarNode('k2', BottomInterval())
}
Operations0 = [
PlusOp(Variables0['k0'], Variables0['k'], Variables0['k1']),
UnaryOp(Variables0['k1'], Variables0['k2'], b=1),
UnaryOp(Variables0['k2'], Variables0['k'])
]
UseMap0 = {
'k' : [Operations0[0]],
'k0': [Operations0[0]],
'k1': [Operations0[1]],
'k2': [Operations0[2]]
}
# Graph 1:
Variables1 = {
'k0': VarNode('k0', Interval(3, 3)),
'k1': VarNode('k1', Interval(5, 5)),
'k2': VarNode('k2', BottomInterval()),
'k3': VarNode('k3', Interval(7, 7)),
'k4': VarNode('k4', BottomInterval())
}
Operations1 = [
PlusOp(Variables1['k0'], Variables1['k1'], Variables1['k2']),
PlusOp(Variables1['k1'], Variables1['k3'], Variables1['k4']),
PlusOp(Variables1['k2'], Variables1['k4'], Variables1['k3'])
]
UseMap1 = {
'k0': [Operations1[0]],
'k1': [Operations1[0], Operations1[1]],
'k2': [Operations1[2]],
'k3': [Operations1[1]],
'k4': [Operations1[2]]
}
# Graph 2:
Variables2 = {
'i': VarNode('i', Interval(0, 0)),
'i1': VarNode('i1', BottomInterval()),
'i2': VarNode('i2', BottomInterval()),
'j': VarNode('j', Interval(100, 100)),
'j1': VarNode('j1', BottomInterval()),
'j2': VarNode('j2', BottomInterval()),
}
Operations2 = [
UnaryOp(Variables2['i'], Variables2['i1'], 1, 0, SymbolicInterval(Variables2['j'], '<')),
UnaryOp(Variables2['i1'], Variables2['i2'], b=1),
UnaryOp(Variables2['i2'], Variables2['i']),
UnaryOp(Variables2['j'], Variables2['j1'], 1, 0, SymbolicInterval(Variables2['i'], '>')),
UnaryOp(Variables2['j1'], Variables2['j2'], b = -1),
UnaryOp(Variables2['j2'], Variables2['j'])
]
UseMap2 = {
'i' : [Operations2[0]],
'i1': [Operations2[1]],
'i2': [Operations2[2]],
'j' : [Operations2[3]],
'j1': [Operations2[4]],
'j2': [Operations2[5]]
}
# Graph 3:
Variables3 = {
'a': VarNode('a', BottomInterval()),
'b': VarNode('b', Interval(0, 0)),
'c': VarNode('c', BottomInterval())
}
Operations3 = [
UnaryOp(Variables3['a'], Variables3['b'], 1, 0, Interval('-', 100)),
UnaryOp(Variables3['b'], Variables3['c'], 1, 0),
UnaryOp(Variables3['c'], Variables3['a'], 1, 1)
]
UseMap3 = {
'a': [Operations3[0]],
'b': [Operations3[1]],
'c': [Operations3[2]]
}
# Graph 4:
Variables4 = {
'a': VarNode('a', Interval(1, 1)),
'b': VarNode('b', Interval(0, 0)),
'c': VarNode('c', BottomInterval()),
'd': VarNode('d', BottomInterval()),
'e': VarNode('e', BottomInterval())
}
Operations4 = [
UnaryOp(Variables4['c'], Variables4['a'], 1, -1),
PlusOp(Variables4['a'], Variables4['b'], Variables4['c']),
UnaryOp(Variables4['c'], Variables4['d'], 1, 0, Interval(3, 7)),
UnaryOp(Variables4['c'], Variables4['e'], 1, 0, Interval('-', 10)),
PhiOp(Variables4['d'], Variables4['e'], Variables4['b'])
]
UseMap4 = {
'a': [Operations4[1]],
'b': [Operations4[1]],
'c': [Operations4[0], Operations4[2], Operations4[3]],
'd': [Operations4[4]],
'e': [Operations4[4]]
}
# Graph 5:
Variables5 = {
'i0': VarNode('i0', Interval(0, 0)),
'i1': VarNode('i1', BottomInterval()),
'i2': VarNode('i2', BottomInterval()),
'i3': VarNode('i3', BottomInterval()),
'i4': VarNode('i4', BottomInterval()),
'i5': VarNode('i5', BottomInterval()),
'i6': VarNode('i6', BottomInterval()),
'i7': VarNode('i7', BottomInterval())
}
Operations5 = [
PhiOp(Variables5['i0'], Variables5['i6'], Variables5['i1']),
UnaryOp(Variables5['i1'], Variables5['i2'], 1, 0, Interval('-', 41)),
UnaryOp(Variables5['i1'], Variables5['i4'], 1, 0, Interval(42, '+')),
UnaryOp(Variables5['i2'], Variables5['i3'], 1, 1),
UnaryOp(Variables5['i4'], Variables5['i7'], 1, 0, Interval(43, '+')),
UnaryOp(Variables5['i7'], Variables5['i5'], 1, 1),
PhiOp(Variables5['i3'], Variables5['i5'], Variables5['i6'])
]
UseMap5 = {
'i0': [Operations5[0]],
'i1': [Operations5[1], Operations5[2]],
'i2': [Operations5[3]],
'i3': [Operations5[6]],
'i4': [Operations5[4]],
'i5': [Operations5[6]],
'i6': [Operations5[0]],
'i7': [Operations5[5]]
}
# Graph 6:
Variables6 = {
'b0': VarNode('b0', BottomInterval()),
'b1': VarNode('b1', BottomInterval()),
'b2': VarNode('b2', BottomInterval()),
'b3': VarNode('b3', BottomInterval()),
'b4': VarNode('b4', BottomInterval()),
'b5': VarNode('b5', Interval(1, 1)),
'b6': VarNode('b6', BottomInterval())
}
Operations6 = [
PhiOp(Variables6['b5'], Variables6['b4'], Variables6['b2']),
PhiOp(Variables6['b3'], Variables6['b2'], Variables6['b6']),
UnaryOp(Variables6['b6'], Variables6['b1'], 1, 2, Interval()),
UnaryOp(Variables6['b6'], Variables6['b0'], 1, -3, Interval()),
UnaryOp(Variables6['b0'], Variables6['b4'], 1, 0, Interval(0, 5)),
UnaryOp(Variables6['b1'], Variables6['b3'], 1, 0, Interval(0, 5))
]
UseMap6 = {
'b0': [Operations6[4]],
'b1': [Operations6[5]],
'b2': [Operations6[1]],
'b3': [Operations6[1]],
'b4': [Operations6[0]],
'b5': [Operations6[0]],
'b6': [Operations6[2], Operations6[3]]
}
# Graph 16:
Variables16 = {
'i0': VarNode('i0', Interval(5, 5)),
'i1': VarNode('i1', BottomInterval()),
'i2': VarNode('i2', BottomInterval()),
'i3': VarNode('i3', BottomInterval()),
'i4': VarNode('i4', BottomInterval()),
'i5': VarNode('i5', BottomInterval()),
'i6': VarNode('i6', BottomInterval()),
'i7': VarNode('i7', BottomInterval())
}
Operations16 = [
PhiOp(Variables16['i0'], Variables16['i4'], Variables16['i5']),
PhiOp(Variables16['i5'], Variables16['i6'], Variables16['i7']),
UnaryOp(Variables16['i7'], Variables16['i1'], 1, 0, Interval(2, 11)),
UnaryOp(Variables16['i1'], Variables16['i2'], 1, 0, Interval(1, 10)),
UnaryOp(Variables16['i2'], Variables16['i3'], 1, 0, Interval(3, 12)),
UnaryOp(Variables16['i3'], Variables16['i4'], 1, -1),
UnaryOp(Variables16['i3'], Variables16['i6'], 1, 1)
]
UseMap16 = {
'i0': [Operations16[0]],
'i1': [Operations16[3]],
'i2': [Operations16[4]],
'i3': [Operations16[5], Operations16[6]],
'i4': [Operations16[0]],
'i5': [Operations16[1]],
'i6': [Operations16[1]],
'i7': [Operations16[2]]
}
# Graph 17:
Variables17 = {
'i0': VarNode('i0', Interval(0, 0)),
'i1': VarNode('i1', BottomInterval()),
'i2': VarNode('i2', BottomInterval()),
'i3': VarNode('i3', BottomInterval()),
'i4': VarNode('i4', BottomInterval())
}
Operations17 = [
PhiOp(Variables17['i0'], Variables17['i4'], Variables17['i1']),
UnaryOp(Variables17['i1'], Variables17['i2'], 1, 0, Interval('-', 1000)),
UnaryOp(Variables17['i2'], Variables17['i3'], 1, 0, Interval('-', 8)),
UnaryOp(Variables17['i3'], Variables17['i4'], 1, 1)
]
UseMap17 = {
'i0': [Operations17[0]],
'i1': [Operations17[1]],
'i2': [Operations17[2]],
'i3': [Operations17[3],],
'i4': [Operations17[0]]
}
def toDot(Title, Variables, Operations):
"""Print the edges in dot format."""
print 'digraph "' + Title + '" {'
for v, k in Variables.iteritems():
print " ", v, "[label=\"", str(k), "\"]"
for op in Operations:
print op.toDotStr()
print '}'
def growth_meet(op):
"""This is the meet operator of the growth analysis. The growth analysis
will change the bounds of each variable, if necessary. Initially, each
variable is bound to either the undefined interval, e.g. [., .], or to
a constant interval, e.g., [3, 15]. After this analysis runs, there will
be no undefined interval. Each variable will be either bound to a
constant interval, or to [-, c], or to [c, +], or to [-, +]."""
old_int = op.sink.interval
new_int = op.eval()
if isinstance(old_int, BottomInterval):
op.sink.interval = new_int
elif lt(new_int.l, old_int.l) and gt(new_int.u, old_int.u):
op.sink.interval = Interval()
elif lt(new_int.l, old_int.l):
op.sink.interval = Interval('-', old_int.u)
elif gt(new_int.u, old_int.u):
op.sink.interval = Interval(old_int.l, '+')
return old_int != op.sink.interval
def crop_meet(op):
"""This is the meet operator of the cropping analysis. Whereas the growth
analysis expands the bounds of each variable, regardless of intersections
in the constraint graph, the cropping analysis shyrinks these bounds back
to ranges that respect the intersections. Notice that we need to have
a reference to the original state of the variable, e.g., 0, +, - and ?.
We cannot store this information in the interval itself, because this
interval is likely to change from, say, +, to a constant."""
new_int = op.eval()
print "Processing ", str(op)
has_changed = False
if (op.sink.state == '-' or op.sink.state == '?') and gt(new_int.l, op.sink.interval.l):
op.sink.interval.l = new_int.l
has_changed = True
if (op.sink.state == '+' or op.sink.state == '?') and lt(new_int.u, op.sink.interval.u):
op.sink.interval.u = new_int.u
has_changed = True
print " - after evaluation: ", str(op.sink.interval)
return has_changed
def iterate_till_fix_point(use_map, active_vars, meet):
"""This method finds the abstract state of each variable. If we pass to it
the growth_meet, then we will have the growth analysis. If we pass the
crop_meet, then we will have the cropping analysis."""
if len(active_vars) > 0:
next_variable = active_vars.pop()
use_list = use_map[next_variable]
for op in use_list:
if meet(op):
active_vars.add(op.sink.name)
iterate_till_fix_point(use_map, active_vars, meet)
def store_abstract_states(variables):
"""This method stores the abstract state of each variable. The possible
states are '0', '+', '-' and '?'. This method must be called before we
run the cropping analysis, because these states will guide the meet
operator when cropping ranges."""
for var in variables:
if var.interval.l == '-' and var.interval.u == '+':
var.state = '?'
elif var.interval.l == '-':
var.state = '-'
elif var.interval.u == '+':
var.state = '+'
else:
var.state = '0'
def int_op(Variables, UseMap):
"""This method finds which operations contain non-trivial intersection.
The variables used in these operations will be the starting point of the
cropping analysis, and shall be returned by this method."""
non_trivial_set = set()
for var_name in Variables:
for op in UseMap[var_name]:
if isinstance(op, UnaryOp) and (op.i.u != '+' or op.i.l != '-'):
non_trivial_set.add(var_name)
break
return non_trivial_set
def findIntervals(Variables, Operations, UseMap, entryPoints):
"""Finds the intervals of a SCC."""
toDot("First graph", Variables, Operations)
iterate_till_fix_point(UseMap, set(entryPoints), growth_meet)
toDot("After Growth Analysis", Variables, Operations)
for op in Operations:
op.fixIntersects()
toDot("After fixing the intersections", Variables, Operations)
store_abstract_states(Variables.values())
iterate_till_fix_point(UseMap, int_op(Variables, UseMap), crop_meet)
toDot("After cropping analysis", Variables, Operations)
#findIntervals(Variables0, Operations0, UseMap0, ['k', 'k0'])
#findIntervals(Variables1, Operations1, UseMap1, ['k0', 'k1', 'k3'])
#findIntervals(Variables2, Operations2, UseMap2, ['i', 'j'])
#findIntervals(Variables3, Operations3, UseMap3, ['b'])
#findIntervals(Variables4, Operations4, UseMap4, ['a', 'b'])
#findIntervals(Variables5, Operations5, UseMap5, ['i0'])
#findIntervals(Variables6, Operations6, UseMap6, ['b5'])
#findIntervals(Variables16, Operations16, UseMap16, ['i0'])
findIntervals(Variables17, Operations17, UseMap17, ['i0'])
| gpl-2.0 | 8,896,778,648,553,874,000 | 32.611993 | 92 | 0.5913 | false |
redhat-openstack/cinder | cinder/version.py | 2 | 1077 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
CINDER_VENDOR = "OpenStack Foundation"
CINDER_PRODUCT = "OpenStack Cinder"
CINDER_PACKAGE = None # OS distro package version suffix
loaded = False
class VersionInfo:
version = "REDHATCINDERVERSION"
release = "REDHATCINDERRELEASE"
def release_string(self):
return '%s-%s' % (self.version, self.release)
def version_string(self):
return self.version
version_info = VersionInfo()
version_string = version_info.version_string
| apache-2.0 | -1,979,264,150,002,856,000 | 31.636364 | 78 | 0.721448 | false |
silveregg/moto | tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py | 2 | 7273 | from __future__ import unicode_literals
template = {
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample template showing how to create a highly-available, RDS DBInstance with a read replica. **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.",
"Parameters": {
"DBName": {
"Default": "MyDatabase",
"Description" : "The database name",
"Type": "String",
"MinLength": "1",
"MaxLength": "64",
"AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*",
"ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters."
},
"DBInstanceIdentifier": {
"Type": "String"
},
"DBUser": {
"NoEcho": "true",
"Description" : "The database admin account username",
"Type": "String",
"MinLength": "1",
"MaxLength": "16",
"AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*",
"ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters."
},
"DBPassword": {
"NoEcho": "true",
"Description" : "The database admin account password",
"Type": "String",
"MinLength": "1",
"MaxLength": "41",
"AllowedPattern" : "[a-zA-Z0-9]+",
"ConstraintDescription" : "must contain only alphanumeric characters."
},
"DBAllocatedStorage": {
"Default": "5",
"Description" : "The size of the database (Gb)",
"Type": "Number",
"MinValue": "5",
"MaxValue": "1024",
"ConstraintDescription" : "must be between 5 and 1024Gb."
},
"DBInstanceClass": {
"Description" : "The database instance type",
"Type": "String",
"Default": "db.m1.small",
"AllowedValues" : [ "db.t1.micro", "db.m1.small", "db.m1.medium", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.cr1.8xlarge"]
,
"ConstraintDescription" : "must select a valid database instance type."
},
"EC2SecurityGroup": {
"Description" : "The EC2 security group that contains instances that need access to the database",
"Default": "default",
"Type": "String",
"AllowedPattern" : "[a-zA-Z0-9\\-]+",
"ConstraintDescription" : "must be a valid security group name."
},
"MultiAZ" : {
"Description" : "Multi-AZ master database",
"Type" : "String",
"Default" : "false",
"AllowedValues" : [ "true", "false" ],
"ConstraintDescription" : "must be true or false."
}
},
"Conditions" : {
"Is-EC2-VPC" : { "Fn::Or" : [ {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "eu-central-1" ]},
{"Fn::Equals" : [{"Ref" : "AWS::Region"}, "cn-north-1" ]}]},
"Is-EC2-Classic" : { "Fn::Not" : [{ "Condition" : "Is-EC2-VPC"}]}
},
"Resources" : {
"DBParameterGroup": {
"Type": "AWS::RDS::DBParameterGroup",
"Properties" : {
"Description": "DB Parameter Goup",
"Family" : "MySQL5.1",
"Parameters": {
"BACKLOG_QUEUE_LIMIT": "2048"
}
}
},
"DBEC2SecurityGroup": {
"Type": "AWS::EC2::SecurityGroup",
"Condition" : "Is-EC2-VPC",
"Properties" : {
"GroupDescription": "Open database for access",
"SecurityGroupIngress" : [{
"IpProtocol" : "tcp",
"FromPort" : "3306",
"ToPort" : "3306",
"SourceSecurityGroupName" : { "Ref" : "EC2SecurityGroup" }
}]
}
},
"DBSecurityGroup": {
"Type": "AWS::RDS::DBSecurityGroup",
"Condition" : "Is-EC2-Classic",
"Properties": {
"DBSecurityGroupIngress": [{
"EC2SecurityGroupName": { "Ref": "EC2SecurityGroup" }
}],
"GroupDescription": "database access"
}
},
"my_vpc": {
"Type" : "AWS::EC2::VPC",
"Properties" : {
"CidrBlock" : "10.0.0.0/16",
}
},
"EC2Subnet": {
"Type" : "AWS::EC2::Subnet",
"Condition" : "Is-EC2-VPC",
"Properties" : {
"AvailabilityZone" : "eu-central-1a",
"CidrBlock" : "10.0.1.0/24",
"VpcId" : { "Ref" : "my_vpc" }
}
},
"DBSubnet": {
"Type": "AWS::RDS::DBSubnetGroup",
"Condition" : "Is-EC2-VPC",
"Properties": {
"DBSubnetGroupDescription": "my db subnet group",
"SubnetIds" : [ { "Ref": "EC2Subnet" } ],
}
},
"MasterDB" : {
"Type" : "AWS::RDS::DBInstance",
"Properties" : {
"DBInstanceIdentifier": { "Ref": "DBInstanceIdentifier" },
"DBName" : { "Ref" : "DBName" },
"AllocatedStorage" : { "Ref" : "DBAllocatedStorage" },
"DBInstanceClass" : { "Ref" : "DBInstanceClass" },
"Engine" : "MySQL",
"DBSubnetGroupName": {"Fn::If": ["Is-EC2-VPC", { "Ref": "DBSubnet" }, { "Ref": "AWS::NoValue" }]},
"MasterUsername" : { "Ref" : "DBUser" },
"MasterUserPassword" : { "Ref" : "DBPassword" },
"MultiAZ" : { "Ref" : "MultiAZ" },
"Tags" : [{ "Key" : "Name", "Value" : "Master Database" }],
"VPCSecurityGroups": { "Fn::If" : [ "Is-EC2-VPC", [ { "Fn::GetAtt": [ "DBEC2SecurityGroup", "GroupId" ] } ], { "Ref" : "AWS::NoValue"}]},
"DBSecurityGroups": { "Fn::If" : [ "Is-EC2-Classic", [ { "Ref": "DBSecurityGroup" } ], { "Ref" : "AWS::NoValue"}]}
},
"DeletionPolicy" : "Snapshot"
},
"ReplicaDB" : {
"Type" : "AWS::RDS::DBInstance",
"Properties" : {
"SourceDBInstanceIdentifier" : { "Ref" : "MasterDB" },
"DBInstanceClass" : { "Ref" : "DBInstanceClass" },
"Tags" : [{ "Key" : "Name", "Value" : "Read Replica Database" }]
}
}
},
"Outputs" : {
"EC2Platform" : {
"Description" : "Platform in which this stack is deployed",
"Value" : { "Fn::If" : [ "Is-EC2-VPC", "EC2-VPC", "EC2-Classic" ]}
},
"MasterJDBCConnectionString": {
"Description" : "JDBC connection string for the master database",
"Value" : { "Fn::Join": [ "", [ "jdbc:mysql://",
{ "Fn::GetAtt": [ "MasterDB", "Endpoint.Address" ] },
":",
{ "Fn::GetAtt": [ "MasterDB", "Endpoint.Port" ] },
"/",
{ "Ref": "DBName" }]]}
},
"ReplicaJDBCConnectionString": {
"Description" : "JDBC connection string for the replica database",
"Value" : { "Fn::Join": [ "", [ "jdbc:mysql://",
{ "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Address" ] },
":",
{ "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Port" ] },
"/",
{ "Ref": "DBName" }]]}
}
}
}
| apache-2.0 | 892,798,649,775,853,300 | 35.18408 | 386 | 0.510381 | false |
sunshinelover/chanlun | vn.trader/ctpGateway/ctpGateway.py | 1 | 55491 | # encoding: UTF-8
'''
vn.ctp的gateway接入
考虑到现阶段大部分CTP中的ExchangeID字段返回的都是空值
vtSymbol直接使用symbol
'''
import os
import json
from copy import copy
from vnctpmd import MdApi
from vnctptd import TdApi
from ctpDataType import *
from vtGateway import *
# 以下为一些VT类型和CTP类型的映射字典
# 价格类型映射
priceTypeMap = {}
priceTypeMap[PRICETYPE_LIMITPRICE] = defineDict["THOST_FTDC_OPT_LimitPrice"]
priceTypeMap[PRICETYPE_MARKETPRICE] = defineDict["THOST_FTDC_OPT_AnyPrice"]
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
directionMap = {}
directionMap[DIRECTION_LONG] = defineDict['THOST_FTDC_D_Buy']
directionMap[DIRECTION_SHORT] = defineDict['THOST_FTDC_D_Sell']
directionMapReverse = {v: k for k, v in directionMap.items()}
# 开平类型映射
offsetMap = {}
offsetMap[OFFSET_OPEN] = defineDict['THOST_FTDC_OF_Open']
offsetMap[OFFSET_CLOSE] = defineDict['THOST_FTDC_OF_Close']
offsetMap[OFFSET_CLOSETODAY] = defineDict['THOST_FTDC_OF_CloseToday']
offsetMap[OFFSET_CLOSEYESTERDAY] = defineDict['THOST_FTDC_OF_CloseYesterday']
offsetMapReverse = {v:k for k,v in offsetMap.items()}
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_CFFEX] = 'CFFEX'
exchangeMap[EXCHANGE_SHFE] = 'SHFE'
exchangeMap[EXCHANGE_CZCE] = 'CZCE'
exchangeMap[EXCHANGE_DCE] = 'DCE'
exchangeMap[EXCHANGE_SSE] = 'SSE'
exchangeMap[EXCHANGE_UNKNOWN] = ''
EXCHANGE_INE = 'INE' #上海能源交易中心
exchangeMap[EXCHANGE_INE] = 'INE'
exchangeMapReverse = {v:k for k,v in exchangeMap.items()}
# 持仓类型映射
posiDirectionMap = {}
posiDirectionMap[DIRECTION_NET] = defineDict["THOST_FTDC_PD_Net"]
posiDirectionMap[DIRECTION_LONG] = defineDict["THOST_FTDC_PD_Long"]
posiDirectionMap[DIRECTION_SHORT] = defineDict["THOST_FTDC_PD_Short"]
posiDirectionMapReverse = {v:k for k,v in posiDirectionMap.items()}
# 产品类型映射
productClassMap = {}
productClassMap[PRODUCT_FUTURES] = defineDict["THOST_FTDC_PC_Futures"]
productClassMap[PRODUCT_OPTION] = defineDict["THOST_FTDC_PC_Options"]
productClassMap[PRODUCT_COMBINATION] = defineDict["THOST_FTDC_PC_Combination"]
productClassMapReverse = {v:k for k,v in productClassMap.items()}
########################################################################
class CtpGateway(VtGateway):
"""CTP接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='CTP'):
"""Constructor"""
super(CtpGateway, self).__init__(eventEngine, gatewayName)
self.mdApi = CtpMdApi(self, eventEngine) # 行情API
self.tdApi = CtpTdApi(self) # 交易API
self.mdConnected = False # 行情API连接状态,登录完成后为True
self.tdConnected = False # 交易API连接状态
self.qryEnabled = False # 是否要启动循环查询
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json文件
fileName = self.gatewayName + '_connect.json'
fileName = os.getcwd() + '/ctpGateway/' + fileName
try:
f = file(fileName)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
userID = str(setting['userID'])
password = str(setting['password'])
brokerID = str(setting['brokerID'])
tdAddress = str(setting['tdAddress'])
mdAddress = str(setting['mdAddress'])
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'连接配置缺少字段,请检查'
self.onLog(log)
return
# 创建行情和交易接口对象
self.mdApi.connect(userID, password, brokerID, mdAddress)
self.tdApi.connect(userID, password, brokerID, tdAddress)
# 初始化并启动查询
self.initQuery()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
self.mdApi.subscribe(subscribeReq)
#----------------------------------------------------------------------
def unsubscribe(self, unsubscribeReq):
"""取消订阅行情"""
self.mdApi.unsubscribe(unsubscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.tdApi.sendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.tdApi.cancelOrder(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.tdApi.qryAccount()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.tdApi.qryPosition()
#----------------------------------------------------------------------
def close(self):
"""关闭"""
if self.mdConnected:
self.mdApi.close()
if self.tdConnected:
self.tdApi.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount, self.qryPosition]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
########################################################################
class CtpMdApi(MdApi):
"""CTP行情API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway, eventEngine):
"""Constructor"""
super(CtpMdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.eventEngine = eventEngine
self.reqID = EMPTY_INT # 操作请求编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.subscribedSymbols = set() # 已订阅合约代码
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接成功'
self.gateway.onLog(log)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.mdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
"""心跳报警"""
# 因为API的心跳报警比较常被触发,且与API工作关系不大,因此选择忽略
pass
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登录完成'
self.gateway.onLog(log)
# 重新订阅之前订阅的合约
for subscribeReq in self.subscribedSymbols:
self.subscribe(subscribeReq)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.mdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
# 通常不在乎订阅错误,选择忽略
pass
#----------------------------------------------------------------------
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
# 同上
pass
#----------------------------------------------------------------------
def onRtnDepthMarketData(self, data):
"""行情推送"""
tick = VtTickData()
tick.gatewayName = self.gatewayName
tick.symbol = data['InstrumentID']
tick.exchange = exchangeMapReverse.get(data['ExchangeID'], u'未知')
tick.vtSymbol = tick.symbol #'.'.join([tick.symbol, EXCHANGE_UNKNOWN])
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.time = '.'.join([data['UpdateTime'], str(data['UpdateMillisec']/100)])
tick.date = data['TradingDay']
tick.openPrice = data['OpenPrice']
tick.highPrice = data['HighestPrice']
tick.lowPrice = data['LowestPrice']
tick.preClosePrice = data['PreClosePrice']
tick.upperLimit = data['UpperLimitPrice']
tick.lowerLimit = data['LowerLimitPrice']
# CTP只有一档行情
tick.bidPrice1 = data['BidPrice1']
tick.bidVolume1 = data['BidVolume1']
tick.askPrice1 = data['AskPrice1']
tick.askVolume1 = data['AskVolume1']
self.gateway.onTick(tick)
# 常规行情事件
event = Event(type_=EVENT_MARKETDATA)
event.dict_['data'] = data
self.eventEngine.put(event)
#----------------------------------------------------------------------
def onRspSubForQuoteRsp(self, data, error, n, last):
"""订阅期权询价"""
pass
#----------------------------------------------------------------------
def onRspUnSubForQuoteRsp(self, data, error, n, last):
"""退订期权询价"""
pass
#----------------------------------------------------------------------
def onRtnForQuoteRsp(self, data):
"""期权询价推送"""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = os.getcwd() + '/temp/' + self.gatewayName + '/'
if not os.path.exists(path):
os.makedirs(path)
self.createFtdcMdApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
if self.loginStatus:
self.subscribeMarketData(str(subscribeReq.symbol))
self.subscribedSymbols.add(subscribeReq)
#----------------------------------------------------------------------
def unsubscribe(self, unsubscribeReq):
#取消订阅合约
if self.loginStatus:
self.unSubscribeMarketData(str(unsubscribeReq.symbol))
#----------------------------------------------------------------------
def login(self):
"""登录"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
########################################################################
class CtpTdApi(TdApi):
"""CTP交易API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(CtpTdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.orderRef = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 会话编号
self.posBufferDict = {} # 缓存持仓数据的字典
self.symbolExchangeDict = {} # 保存合约代码和交易所的印射关系
self.symbolSizeDict = {} # 保存合约代码和合约大小的印射关系
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接成功'
self.gateway.onLog(log)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
""""""
pass
#----------------------------------------------------------------------
def onRspAuthenticate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.frontID = str(data['FrontID'])
self.sessionID = str(data['SessionID'])
self.loginStatus = True
self.gateway.tdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登录完成'
self.gateway.onLog(log)
# 确认结算信息
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqID += 1
self.reqSettlementInfoConfirm(req, self.reqID)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspTradingAccountPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspOrderInsert(self, data, error, n, last):
"""发单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspParkedOrderInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspParkedOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspOrderAction(self, data, error, n, last):
"""撤单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspQueryMaxOrderVolume(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspSettlementInfoConfirm(self, data, error, n, last):
"""确认结算信息回报"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'结算信息确认完成'
self.gateway.onLog(log)
# 查询合约代码
self.reqID += 1
self.reqQryInstrument({}, self.reqID)
#----------------------------------------------------------------------
def onRspRemoveParkedOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspRemoveParkedOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspExecOrderInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspExecOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspForQuoteInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQuoteInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQuoteAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspLockInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspCombActionInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTrade(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPosition(self, data, error, n, last):
"""持仓查询回报"""
# 获取缓存字典中的持仓缓存,若无则创建并初始化
positionName = '.'.join([data['InstrumentID'], data['PosiDirection']])
if positionName in self.posBufferDict:
posBuffer = self.posBufferDict[positionName]
else:
posBuffer = PositionBuffer(data, self.gatewayName)
self.posBufferDict[positionName] = posBuffer
# 更新持仓缓存,并获取VT系统中持仓对象的返回值
exchange = self.symbolExchangeDict.get(data['InstrumentID'], EXCHANGE_UNKNOWN)
size = self.symbolSizeDict.get(data['InstrumentID'], 1)
if exchange == EXCHANGE_SHFE:
pos = posBuffer.updateShfeBuffer(data, size)
else:
pos = posBuffer.updateBuffer(data, size)
self.gateway.onPosition(pos)
#----------------------------------------------------------------------
def onRspQryTradingAccount(self, data, error, n, last):
"""资金账户查询回报"""
account = VtAccountData()
account.gatewayName = self.gatewayName
# 账户代码
account.accountID = data['AccountID']
account.vtAccountID = '.'.join([self.gatewayName, account.accountID])
# 数值相关
account.preBalance = data['PreBalance']
account.available = data['Available']
account.commission = data['Commission']
account.margin = data['CurrMargin']
account.closeProfit = data['CloseProfit']
account.positionProfit = data['PositionProfit']
# 这里的balance和快期中的账户不确定是否一样,需要测试
account.balance = (data['PreBalance'] - data['PreCredit'] - data['PreMortgage'] +
data['Mortgage'] - data['Withdraw'] + data['Deposit'] +
data['CloseProfit'] + data['PositionProfit'] + data['CashIn'] -
data['Commission'])
# 推送
self.gateway.onAccount(account)
#----------------------------------------------------------------------
def onRspQryInvestor(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTradingCode(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentMarginRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentCommissionRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchange(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryProduct(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrument(self, data, error, n, last):
"""合约查询回报"""
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = data['InstrumentID']
contract.exchange = exchangeMapReverse[data['ExchangeID']]
contract.vtSymbol = contract.symbol #'.'.join([contract.symbol, contract.exchange])
contract.name = data['InstrumentName'].decode('GBK')
# 合约数值
contract.size = data['VolumeMultiple']
contract.priceTick = data['PriceTick']
contract.strikePrice = data['StrikePrice']
contract.underlyingSymbol = data['UnderlyingInstrID']
contract.productClass = productClassMapReverse.get(data['ProductClass'], PRODUCT_UNKNOWN)
# 期权类型
if data['OptionsType'] == '1':
contract.optionType = OPTION_CALL
elif data['OptionsType'] == '2':
contract.optionType = OPTION_PUT
# 缓存代码和交易所的印射关系
self.symbolExchangeDict[contract.symbol] = contract.exchange
self.symbolSizeDict[contract.symbol] = contract.size
# 推送
self.gateway.onContract(contract)
if last:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易合约信息获取完成'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onRspQryDepthMarketData(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQrySettlementInfo(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTransferBank(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPositionDetail(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryNotice(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQrySettlementInfoConfirm(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPositionCombineDetail(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryCFMMCTradingAccountKey(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryEWarrantOffset(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorProductGroupMargin(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchangeMarginRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchangeMarginRateAdjust(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchangeRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQrySecAgentACIDMap(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryProductExchRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryProductGroup(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryOptionInstrTradeCost(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryOptionInstrCommRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExecOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryForQuote(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryQuote(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryLock(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryLockPosition(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorLevel(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExecFreeze(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryCombInstrumentGuard(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryCombAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTransferSerial(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryAccountregister(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRtnOrder(self, data):
"""报单回报"""
# 更新最大报单编号
newref = data['OrderRef']
self.orderRef = max(self.orderRef, int(newref))
# 创建报单数据对象
order = VtOrderData()
order.gatewayName = self.gatewayName
# 保存代码和报单号
order.symbol = data['InstrumentID']
order.exchange = exchangeMapReverse[data['ExchangeID']]
order.vtSymbol = order.symbol #'.'.join([order.symbol, order.exchange])
order.orderID = data['OrderRef']
# 方向
if data['Direction'] == '0':
order.direction = DIRECTION_LONG
elif data['Direction'] == '1':
order.direction = DIRECTION_SHORT
else:
order.direction = DIRECTION_UNKNOWN
# 开平
if data['CombOffsetFlag'] == '0':
order.offset = OFFSET_OPEN
elif data['CombOffsetFlag'] == '1':
order.offset = OFFSET_CLOSE
else:
order.offset = OFFSET_UNKNOWN
# 状态
if data['OrderStatus'] == '0':
order.status = STATUS_ALLTRADED
elif data['OrderStatus'] == '1':
order.status = STATUS_PARTTRADED
elif data['OrderStatus'] == '3':
order.status = STATUS_NOTTRADED
elif data['OrderStatus'] == '5':
order.status = STATUS_CANCELLED
else:
order.status = STATUS_UNKNOWN
# 价格、报单量等数值
order.price = data['LimitPrice']
order.totalVolume = data['VolumeTotalOriginal']
order.tradedVolume = data['VolumeTraded']
order.orderTime = data['InsertTime']
order.cancelTime = data['CancelTime']
order.frontID = data['FrontID']
order.sessionID = data['SessionID']
# CTP的报单号一致性维护需要基于frontID, sessionID, orderID三个字段
# 但在本接口设计中,已经考虑了CTP的OrderRef的自增性,避免重复
# 唯一可能出现OrderRef重复的情况是多处登录并在非常接近的时间内(几乎同时发单)
# 考虑到VtTrader的应用场景,认为以上情况不会构成问题
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
# 推送
self.gateway.onOrder(order)
#----------------------------------------------------------------------
def onRtnTrade(self, data):
"""成交回报"""
# 创建报单数据对象
trade = VtTradeData()
trade.gatewayName = self.gatewayName
# 保存代码和报单号
trade.symbol = data['InstrumentID']
trade.exchange = exchangeMapReverse[data['ExchangeID']]
trade.vtSymbol = trade.symbol #'.'.join([trade.symbol, trade.exchange])
trade.tradeID = data['TradeID']
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = data['OrderRef']
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
# 方向
trade.direction = directionMapReverse.get(data['Direction'], '')
# 开平
trade.offset = offsetMapReverse.get(data['OffsetFlag'], '')
# 价格、报单量等数值
trade.price = data['Price']
trade.volume = data['Volume']
trade.tradeTime = data['TradeTime']
# 推送
self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def onErrRtnOrderInsert(self, data, error):
"""发单错误回报(交易所)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onErrRtnOrderAction(self, data, error):
"""撤单错误回报(交易所)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRtnInstrumentStatus(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnTradingNotice(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnErrorConditionalOrder(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnExecOrder(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnExecOrderInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnExecOrderAction(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnForQuoteInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnQuote(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnQuoteInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnQuoteAction(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnForQuoteRsp(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnCFMMCTradingAccountToken(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnLock(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnLockInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnCombAction(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnCombActionInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRspQryContractBank(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryParkedOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryParkedOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTradingNotice(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryBrokerTradingParams(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryBrokerTradingAlgos(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQueryCFMMCTradingAccountToken(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromBankToFutureByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromFutureToBankByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromBankToFutureByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromFutureToBankByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromBankToFutureByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromFutureToBankByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromBankToFutureByFutureManual(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromFutureToBankByFutureManual(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnQueryBankBalanceByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnBankToFutureByFuture(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnFutureToBankByFuture(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnRepealBankToFutureByFutureManual(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnRepealFutureToBankByFutureManual(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnQueryBankBalanceByFuture(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromBankToFutureByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromFutureToBankByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRspFromBankToFutureByFuture(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspFromFutureToBankByFuture(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQueryBankAccountMoneyByFuture(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRtnOpenAccountByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnCancelAccountByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnChangeAccountByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = os.getcwd() + '/temp/' + self.gatewayName + '/'
if not os.path.exists(path):
os.makedirs(path)
self.createFtdcTraderApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def login(self):
"""连接服务器"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户"""
self.reqID += 1
self.reqQryTradingAccount({}, self.reqID)
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.reqID += 1
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqQryInvestorPosition(req, self.reqID)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
self.reqID += 1
self.orderRef += 1
req = {}
req['InstrumentID'] = orderReq.symbol
req['LimitPrice'] = orderReq.price
req['VolumeTotalOriginal'] = orderReq.volume
# 下面如果由于传入的类型本接口不支持,则会返回空字符串
req['OrderPriceType'] = priceTypeMap.get(orderReq.priceType, '')
req['Direction'] = directionMap.get(orderReq.direction, '')
req['CombOffsetFlag'] = offsetMap.get(orderReq.offset, '')
req['OrderRef'] = str(self.orderRef)
req['InvestorID'] = self.userID
req['UserID'] = self.userID
req['BrokerID'] = self.brokerID
req['CombHedgeFlag'] = defineDict['THOST_FTDC_HF_Speculation'] # 投机单
req['ContingentCondition'] = defineDict['THOST_FTDC_CC_Immediately'] # 立即发单
req['ForceCloseReason'] = defineDict['THOST_FTDC_FCC_NotForceClose'] # 非强平
req['IsAutoSuspend'] = 0 # 非自动挂起
req['TimeCondition'] = defineDict['THOST_FTDC_TC_GFD'] # 今日有效
req['VolumeCondition'] = defineDict['THOST_FTDC_VC_AV'] # 任意成交量
req['MinVolume'] = 1 # 最小成交量为1
# 判断FAK和FOK
if orderReq.priceType == PRICETYPE_FAK:
req['OrderPriceType'] = defineDict["THOST_FTDC_OPT_LimitPrice"]
req['TimeCondition'] = defineDict['THOST_FTDC_TC_IOC']
req['VolumeCondition'] = defineDict['THOST_FTDC_VC_AV']
if orderReq.priceType == PRICETYPE_FOK:
req['OrderPriceType'] = defineDict["THOST_FTDC_OPT_LimitPrice"]
req['TimeCondition'] = defineDict['THOST_FTDC_TC_IOC']
req['VolumeCondition'] = defineDict['THOST_FTDC_VC_CV']
self.reqOrderInsert(req, self.reqID)
# 返回订单号(字符串),便于某些算法进行动态管理
vtOrderID = '.'.join([self.gatewayName, str(self.orderRef)])
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.reqID += 1
req = {}
req['InstrumentID'] = cancelOrderReq.symbol
req['ExchangeID'] = cancelOrderReq.exchange
req['OrderRef'] = cancelOrderReq.orderID
req['FrontID'] = cancelOrderReq.frontID
req['SessionID'] = cancelOrderReq.sessionID
req['ActionFlag'] = defineDict['THOST_FTDC_AF_Delete']
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqOrderAction(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
########################################################################
class PositionBuffer(object):
"""用来缓存持仓的数据,处理上期所的数据返回分今昨的问题"""
#----------------------------------------------------------------------
def __init__(self, data, gatewayName):
"""Constructor"""
self.symbol = data['InstrumentID']
self.direction = posiDirectionMapReverse.get(data['PosiDirection'], '')
self.todayPosition = EMPTY_INT
self.ydPosition = EMPTY_INT
self.todayPositionCost = EMPTY_FLOAT
self.ydPositionCost = EMPTY_FLOAT
# 通过提前创建持仓数据对象并重复使用的方式来降低开销
pos = VtPositionData()
pos.symbol = self.symbol
pos.vtSymbol = self.symbol
pos.gatewayName = gatewayName
pos.direction = self.direction
pos.vtPositionName = '.'.join([pos.vtSymbol, pos.direction])
self.pos = pos
#----------------------------------------------------------------------
def updateShfeBuffer(self, data, size):
"""更新上期所缓存,返回更新后的持仓数据"""
# 昨仓和今仓的数据更新是分在两条记录里的,因此需要判断检查该条记录对应仓位
# 因为今仓字段TodayPosition可能变为0(被全部平仓),因此分辨今昨仓需要用YdPosition字段
if data['YdPosition']:
self.ydPosition = data['Position']
self.ydPositionCost = data['PositionCost']
else:
self.todayPosition = data['Position']
self.todayPositionCost = data['PositionCost']
# 持仓的昨仓和今仓相加后为总持仓
self.pos.position = self.todayPosition + self.ydPosition
self.pos.ydPosition = self.ydPosition
# 如果手头还有持仓,则通过加权平均方式计算持仓均价
if self.todayPosition or self.ydPosition:
self.pos.price = ((self.todayPositionCost + self.ydPositionCost)/
((self.todayPosition + self.ydPosition) * size))
# 否则价格为0
else:
self.pos.price = 0
return copy(self.pos)
#----------------------------------------------------------------------
def updateBuffer(self, data, size):
"""更新其他交易所的缓存,返回更新后的持仓数据"""
# 其他交易所并不区分今昨,因此只关心总仓位,昨仓设为0
self.pos.position = data['Position']
self.pos.ydPosition = 0
if data['Position']:
self.pos.price = data['PositionCost'] / (data['Position'] * size)
else:
self.pos.price = 0
return copy(self.pos)
#----------------------------------------------------------------------
def test():
"""测试"""
from PyQt4 import QtCore
import sys
def print_log(event):
log = event.dict_['data']
print ':'.join([log.logTime, log.logContent])
app = QtCore.QCoreApplication(sys.argv)
eventEngine = EventEngine()
eventEngine.register(EVENT_LOG, print_log)
eventEngine.start()
gateway = CtpGateway(eventEngine)
gateway.connect()
sys.exit(app.exec_())
if __name__ == '__main__':
test() | mit | -9,124,491,041,075,271,000 | 33.939516 | 97 | 0.414241 | false |
dsm054/pandas | pandas/tests/tslibs/test_timezones.py | 6 | 2293 | # -*- coding: utf-8 -*-
from datetime import datetime
import pytest
import pytz
import dateutil.tz
from pandas._libs.tslibs import timezones, conversion
from pandas import Timestamp
@pytest.mark.parametrize('tz_name', list(pytz.common_timezones))
def test_cache_keys_are_distinct_for_pytz_vs_dateutil(tz_name):
if tz_name == 'UTC':
# skip utc as it's a special case in dateutil
return
tz_p = timezones.maybe_get_tz(tz_name)
tz_d = timezones.maybe_get_tz('dateutil/' + tz_name)
if tz_d is None:
# skip timezones that dateutil doesn't know about.
return
assert timezones._p_tz_cache_key(tz_p) != timezones._p_tz_cache_key(tz_d)
def test_tzlocal():
# GH#13583
ts = Timestamp('2011-01-01', tz=dateutil.tz.tzlocal())
assert ts.tz == dateutil.tz.tzlocal()
assert "tz='tzlocal()')" in repr(ts)
tz = timezones.maybe_get_tz('tzlocal()')
assert tz == dateutil.tz.tzlocal()
# get offset using normal datetime for test
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = offset.total_seconds() * 1000000000
assert ts.value + offset == Timestamp('2011-01-01').value
@pytest.mark.parametrize('eastern, localize', [
(pytz.timezone('US/Eastern'), lambda tz, x: tz.localize(x)),
(dateutil.tz.gettz('US/Eastern'), lambda tz, x: x.replace(tzinfo=tz))])
def test_infer_tz(eastern, localize):
utc = pytz.utc
start_naive = datetime(2001, 1, 1)
end_naive = datetime(2009, 1, 1)
start = localize(eastern, start_naive)
end = localize(eastern, end_naive)
assert (timezones.infer_tzinfo(start, end) is
conversion.localize_pydatetime(start_naive, eastern).tzinfo)
assert (timezones.infer_tzinfo(start, None) is
conversion.localize_pydatetime(start_naive, eastern).tzinfo)
assert (timezones.infer_tzinfo(None, end) is
conversion.localize_pydatetime(end_naive, eastern).tzinfo)
start = utc.localize(start_naive)
end = utc.localize(end_naive)
assert timezones.infer_tzinfo(start, end) is utc
end = conversion.localize_pydatetime(end_naive, eastern)
with pytest.raises(Exception):
timezones.infer_tzinfo(start, end)
with pytest.raises(Exception):
timezones.infer_tzinfo(end, start)
| bsd-3-clause | 4,047,214,537,856,406,500 | 33.223881 | 77 | 0.676406 | false |
xfaxca/pymlkit | pymlkit/scripts/spellchecker.py | 1 | 2016 | from pymlkit.nlp.spellcheck import SpellChecker
TIME_LOOKUPS = True
def main():
"""
Testing the spellchecker using a corpus based on a couple of chapters from The Hobbit.
:return:
"""
# Docs can be any set of test files to read in to use in the spellchecker
datadir = '../../data/lotr/'
docs = [
'concerning_hobbits.txt',
'concerning_pipeweed.txt',
'finding_of_the_ring_onwards.txt',
'ordering_of_the_shire.txt'
]
doctexts = []
for doc in docs:
doctexts.append(open(datadir + doc).read())
all_text = " ".join(doctexts)
# Instantiate a SpellChecker and parse the text.
sc = SpellChecker(all_text, autoparse=True) # alternatively, can call `parse_words` and `count_Words` methods separately.
print("{} total words found. {} Unique words".format(sc.n_words, sc.n_unique_words))
for word in ['hobbit', 'the', 'a', 'farm', 'random', 'history', 'stalk', 'asdfasdfasdfasdf', 'stare', 'book']:
print("Probability of {word}: ".format(word=word), str(round(sc.probability(word) * 100, 4)) + '%')
for mistake in ['hobit', 'pip', 'rign', 'stlak', 'shrie', 'ownard', 'teh', 'moer', 'hlep']:
print('Corrected "{}":'.format(mistake), sc.correction(mistake))
loop = True
while loop:
word = input("Please enter a word to spell check").lower().strip()
if word in ['exit', 'quit']:
print("Goodbye.")
loop = False
elif word != '':
from time import time
t0 = time()
print("Probability of {word}: ".format(word=word), str(round(sc.probability(word) * 100, 4)) + '%')
if TIME_LOOKUPS:
print("Total time: {}ms".format(round((time() - t0) * 1000, 2)))
print('Best suggestions to correct "{}":'.format(word), sc.correction(word, n=5))
else:
print("Your input was empty. Please try again or type 'quit' or 'exit' to exit.")
if __name__ == '__main__':
main()
| gpl-3.0 | 3,557,752,126,215,787,000 | 38.529412 | 126 | 0.583829 | false |
1844144/django-blog-zinnia | zinnia/tests/utils.py | 6 | 1928 | """Utils for Zinnia's tests"""
try:
from urllib.parse import parse_qs
from urllib.parse import urlparse
from xmlrpc.client import Transport
except ImportError: # Python 2
from urlparse import parse_qs
from urlparse import urlparse
from xmlrpclib import Transport
from datetime import datetime as original_datetime
from django.utils import six
from django.conf import settings
from django.utils import timezone
from django.test.client import Client
class TestTransport(Transport):
"""
Handles connections to XML-RPC server through Django test client.
"""
def __init__(self, *args, **kwargs):
Transport.__init__(self, *args, **kwargs)
self.client = Client()
def request(self, host, handler, request_body, verbose=0):
self.verbose = verbose
response = self.client.post(handler,
request_body,
content_type="text/xml")
res = six.BytesIO(response.content)
setattr(res, 'getheader', lambda *args: '') # For Python >= 2.7
res.seek(0)
return self.parse_response(res)
def omniscient_datetime(*args):
"""
Generating a datetime aware or naive depending of USE_TZ.
"""
d = original_datetime(*args)
if settings.USE_TZ:
d = timezone.make_aware(d, timezone.utc)
return d
datetime = omniscient_datetime
def is_lib_available(library):
"""
Check if a Python library is available.
"""
try:
__import__(library)
return True
except ImportError:
return False
def urlEqual(url_1, url_2):
"""
Compare two URLs with query string where
ordering does not matter.
"""
parse_result_1 = urlparse(url_1)
parse_result_2 = urlparse(url_2)
return (parse_result_1[:4] == parse_result_2[:4] and
parse_qs(parse_result_1[5]) == parse_qs(parse_result_2[5]))
| bsd-3-clause | -5,645,971,459,084,099,000 | 26.542857 | 72 | 0.631224 | false |
KaranToor/MA450 | google-cloud-sdk/platform/bq/third_party/oauth2client/clientsecrets.py | 8 | 4681 | #!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading OAuth 2.0 client secret files.
A client_secrets.json file contains all the information needed to interact with
an OAuth 2.0 protected service.
"""
import json
import six
# Properties that make a client_secrets.json file valid.
TYPE_WEB = 'web'
TYPE_INSTALLED = 'installed'
VALID_CLIENT = {
TYPE_WEB: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri',
],
'string': [
'client_id',
'client_secret',
],
},
TYPE_INSTALLED: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri',
],
'string': [
'client_id',
'client_secret',
],
},
}
class Error(Exception):
"""Base error for this module."""
pass
class InvalidClientSecretsError(Error):
"""Format of ClientSecrets file is invalid."""
pass
def _validate_clientsecrets(obj):
_INVALID_FILE_FORMAT_MSG = (
'Invalid file format. See '
'https://developers.google.com/api-client-library/'
'python/guide/aaa_client_secrets')
if obj is None:
raise InvalidClientSecretsError(_INVALID_FILE_FORMAT_MSG)
if len(obj) != 1:
raise InvalidClientSecretsError(
_INVALID_FILE_FORMAT_MSG + ' '
'Expected a JSON object with a single property for a "web" or '
'"installed" application')
client_type = tuple(obj)[0]
if client_type not in VALID_CLIENT:
raise InvalidClientSecretsError('Unknown client type: %s.' % (client_type,))
client_info = obj[client_type]
for prop_name in VALID_CLIENT[client_type]['required']:
if prop_name not in client_info:
raise InvalidClientSecretsError(
'Missing property "%s" in a client type of "%s".' % (prop_name,
client_type))
for prop_name in VALID_CLIENT[client_type]['string']:
if client_info[prop_name].startswith('[['):
raise InvalidClientSecretsError(
'Property "%s" is not configured.' % prop_name)
return client_type, client_info
def load(fp):
obj = json.load(fp)
return _validate_clientsecrets(obj)
def loads(s):
obj = json.loads(s)
return _validate_clientsecrets(obj)
def _loadfile(filename):
try:
with open(filename, 'r') as fp:
obj = json.load(fp)
except IOError:
raise InvalidClientSecretsError('File not found: "%s"' % filename)
return _validate_clientsecrets(obj)
def loadfile(filename, cache=None):
"""Loading of client_secrets JSON file, optionally backed by a cache.
Typical cache storage would be App Engine memcache service,
but you can pass in any other cache client that implements
these methods:
* ``get(key, namespace=ns)``
* ``set(key, value, namespace=ns)``
Usage::
# without caching
client_type, client_info = loadfile('secrets.json')
# using App Engine memcache service
from google.appengine.api import memcache
client_type, client_info = loadfile('secrets.json', cache=memcache)
Args:
filename: string, Path to a client_secrets.json file on a filesystem.
cache: An optional cache service client that implements get() and set()
methods. If not specified, the file is always being loaded from
a filesystem.
Raises:
InvalidClientSecretsError: In case of a validation error or some
I/O failure. Can happen only on cache miss.
Returns:
(client_type, client_info) tuple, as _loadfile() normally would.
JSON contents is validated only during first load. Cache hits are not
validated.
"""
_SECRET_NAMESPACE = 'oauth2client:secrets#ns'
if not cache:
return _loadfile(filename)
obj = cache.get(filename, namespace=_SECRET_NAMESPACE)
if obj is None:
client_type, client_info = _loadfile(filename)
obj = {client_type: client_info}
cache.set(filename, obj, namespace=_SECRET_NAMESPACE)
return next(six.iteritems(obj))
| apache-2.0 | -1,867,588,988,385,992,000 | 27.542683 | 80 | 0.655202 | false |
deebuls/youbot_pykdl | scripts/display_urdf.py | 5 | 2367 | #!/usr/bin/python
# Copyright (c) 2013-2014, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import argparse
from urdf_parser_py.urdf import URDF
def main():
parser = argparse.ArgumentParser(usage='Load an URDF file')
parser.add_argument('file', type=argparse.FileType('r'), nargs='?',
default=None, help='File to load. Use - for stdin')
parser.add_argument('-o', '--output', type=argparse.FileType('w'),
default=None, help='Dump file to XML')
args = parser.parse_args()
if args.file is None:
print 'FROM PARAM SERVER'
robot = URDF.from_parameter_server()
else:
print 'FROM STRING'
robot = URDF.from_xml_string(args.file.read())
print(robot)
if args.output is not None:
args.output.write(robot.to_xml_string())
if __name__ == "__main__":
main()
| bsd-3-clause | -1,025,759,241,111,887,600 | 40.526316 | 77 | 0.720744 | false |
robosafe/table | table_simulator/scripts/human_g.py | 2 | 2799 | #!/usr/bin/env python
"""
This script has the functions to change the gaze (head location) and hand location of the human in Gazebo. Inherited to the human.py script.
Created by Dejanira Araiza Illan, July 2015.
"""
import rospy
from bert2_simulator.msg import *
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Point
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import Vector3
from std_msgs.msg import Int8
def move_head(data):
angle = data.angle
if angle<=40.0:
setmodel = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
setmodel(ModelState('human_head',Pose(Point(0.4,0.6,-0.25),Quaternion(0.0,-0.15,-0.25,1.0)),Twist(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.0)),'world'))
else:
setmodel = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
setmodel(ModelState('human_head',Pose(Point(0.0,0.0,0.0),Quaternion(0.0,0.0,0.0,1.0)),Twist(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.0)),'world'))
def move_hand(data):
x = data.x
y = data.y
z = data.z
gx = x-1.15
gy = y+0.43
gz = z-0.73
setmodel2 = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
setmodel2(ModelState('human_hand',Pose(Point(gx,gy,gz),Quaternion(0.0,0.0,0.0,1.0)),Twist(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.0)),'world'))
def reset_head_hand():
setmodel = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
setmodel(ModelState('human_head',Pose(Point(0.0,0.0,0.0),Quaternion(0.0,0.0,0.0,1.0)),Twist(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.0)),'world'))
setmodel(ModelState('human_hand',Pose(Point(0.0,0.0,0.0),Quaternion(0.0,0.0,0.0,1.0)),Twist(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.0)),'world'))
def reset_head_hand2(data):
setmodel = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
setmodel(ModelState('human_head',Pose(Point(0.0,0.0,0.0),Quaternion(0.0,0.0,0.0,1.0)),Twist(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.0)),'world'))
setmodel(ModelState('human_hand',Pose(Point(0.0,0.0,0.0),Quaternion(0.0,0.0,0.0,1.0)),Twist(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.0)),'world'))
def main():
rospy.init_node('human_gazebo_controller', anonymous=True)
#Reset conditions in Gazebo
reset_head_hand()
while not rospy.is_shutdown():
rospy.sleep(0.005)
rospy.Subscriber("location", Location, move_hand)
rospy.sleep(0.005)
rospy.Subscriber("gaze", Gaze, move_head)
rospy.sleep(0.005)
rospy.Subscriber("reset_human", Int8, reset_head_hand2)
#--------------------------------------------------------------------------------------
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException: #to stop the code when pressing Ctr+c
pass
| gpl-3.0 | -6,432,549,062,762,144,000 | 37.342466 | 152 | 0.676313 | false |
michaelzzh/LF_PredictionIO | examples/scala-parallel-similarproduct/filterbyyear/data/import_eventserver.py | 9 | 2629 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Import sample data for similar product engine
"""
import predictionio
import argparse
import random
SEED = 3
def import_events(client):
random.seed(SEED)
count = 0
print client.get_status()
print "Importing data..."
# generate 10 users, with user ids u1,u2,....,u10
user_ids = ["u%s" % i for i in range(1, 11)]
for user_id in user_ids:
print "Set user", user_id
client.create_event(
event="$set",
entity_type="user",
entity_id=user_id
)
count += 1
# generate 50 items, with item ids i1,i2,....,i50
# random assign 1 to 4 categories among c1-c6 to items
categories = ["c%s" % i for i in range(1, 7)]
item_ids = ["i%s" % i for i in range(1, 51)]
for item_id in item_ids:
print "Set item", item_id
client.create_event(
event="$set",
entity_type="item",
entity_id=item_id,
properties={
"categories" : random.sample(categories, random.randint(1, 4))
}
)
count += 1
# each user randomly viewed 10 items
for user_id in user_ids:
for viewed_item in random.sample(item_ids, 10):
print "User", user_id ,"views item", viewed_item
client.create_event(
event="view",
entity_type="user",
entity_id=user_id,
target_entity_type="item",
target_entity_id=viewed_item
)
count += 1
print "%s events are imported." % count
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Import sample data for similar product engine")
parser.add_argument('--access_key', default='invald_access_key')
parser.add_argument('--url', default="http://localhost:7070")
args = parser.parse_args()
print args
client = predictionio.EventClient(
access_key=args.access_key,
url=args.url,
threads=5,
qsize=500)
import_events(client)
| apache-2.0 | -7,096,045,361,157,417,000 | 28.211111 | 74 | 0.667554 | false |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/externals/joblib/parallel.py | 13 | 33171 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
from contextlib import contextmanager
import warnings
try:
import cPickle as pickle
except ImportError:
import pickle
from ._multiprocessing_helpers import mp
from .format_stack import format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_bytes
from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
ThreadingBackend, SequentialBackend)
from ._compat import _basestring
# Make sure that those two classes are part of the public joblib.parallel API
# so that 3rd party backend implementers can import them from here.
from ._parallel_backends import AutoBatchingMixin # noqa
from ._parallel_backends import ParallelBackendBase # noqa
BACKENDS = {
'multiprocessing': MultiprocessingBackend,
'threading': ThreadingBackend,
'sequential': SequentialBackend,
}
# name of the backend used by default by Parallel outside of any context
# managed by ``parallel_backend``.
DEFAULT_BACKEND = 'multiprocessing'
DEFAULT_N_JOBS = 1
# Thread local value that can be overridden by the ``parallel_backend`` context
# manager
_backend = threading.local()
def get_active_backend():
"""Return the active default backend"""
active_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
if active_backend_and_jobs is not None:
return active_backend_and_jobs
# We are outside of the scope of any parallel_backend context manager,
# create the default backend instance now
active_backend = BACKENDS[DEFAULT_BACKEND]()
return active_backend, DEFAULT_N_JOBS
@contextmanager
def parallel_backend(backend, n_jobs=-1, **backend_params):
"""Change the default backend used by Parallel inside a with block.
If ``backend`` is a string it must match a previously registered
implementation using the ``register_parallel_backend`` function.
Alternatively backend can be passed directly as an instance.
By default all available workers will be used (``n_jobs=-1``) unless the
caller passes an explicit value for the ``n_jobs`` parameter.
This is an alternative to passing a ``backend='backend_name'`` argument to
the ``Parallel`` class constructor. It is particularly useful when calling
into library code that uses joblib internally but does not expose the
backend argument in its own API.
>>> from operator import neg
>>> with parallel_backend('threading'):
... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
...
[-1, -2, -3, -4, -5]
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
if isinstance(backend, _basestring):
backend = BACKENDS[backend](**backend_params)
old_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
try:
_backend.backend_and_jobs = (backend, n_jobs)
# return the backend instance to make it easier to write tests
yield backend, n_jobs
finally:
if old_backend_and_jobs is None:
if getattr(_backend, 'backend_and_jobs', None) is not None:
del _backend.backend_and_jobs
else:
_backend.backend_and_jobs = old_backend_and_jobs
# Under Linux or OS X the default start method of multiprocessing
# can cause third party libraries to crash. Under Python 3.4+ it is possible
# to set an environment variable to switch the default start method from
# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
# of causing semantic changes and some additional pool instantiation overhead.
if hasattr(mp, 'get_context'):
method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
"""Return the number of CPUs."""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
self.parallel._backend.batch_completed(self.batch_size,
this_batch_duration)
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
def register_parallel_backend(name, factory, make_default=False):
"""Register a new Parallel backend factory.
The new backend can then be selected by passing its name as the backend
argument to the Parallel class. Moreover, the default backend can be
overwritten globally by setting make_default=True.
The factory can be any callable that takes no argument and return an
instance of ``ParallelBackendBase``.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
BACKENDS[name] = factory
if make_default:
global DEFAULT_BACKEND
DEFAULT_BACKEND = name
def effective_n_jobs(n_jobs=-1):
"""Determine the number of jobs that can actually run in parallel
n_jobs is the is the number of workers requested by the callers.
Passing n_jobs=-1 means requesting all available workers for instance
matching the number of CPU cores on the worker host(s).
This method should return a guesstimate of the number of workers that can
actually perform work concurrently with the currently enabled default
backend. The primary use case is to make it possible for the caller to know
in how many chunks to slice the work.
In general working on larger data chunks is more efficient (less
scheduling overhead and better use of CPU cache prefetching heuristics)
as long as all the workers have enough work to do.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
backend, _ = get_active_backend()
return backend.effective_n_jobs(n_jobs=n_jobs)
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str, ParallelBackendBase instance or None, \
default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
- finally, you can register backends by calling
register_parallel_backend. This will allow you to implement
a backend of your liking.
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
timeout: float, optional
Timeout limit for each task to complete. If any task takes longer
a TimeOutError will be raised. Only applied when n_jobs != 1
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers should never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
variable,
- /dev/shm if the folder exists and is writable: this is a
RAMdisk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with TMP, TMPDIR or TEMP environment
variables, typically /tmp under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
Memmapping mode for numpy arrays passed to workers.
See 'max_nbytes' parameter documentation for more details.
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages:
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process:
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend=None, verbose=0, timeout=None,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
active_backend, default_n_jobs = get_active_backend()
if backend is None and n_jobs == 1:
# If we are under a parallel_backend context manager, look up
# the default number of jobs and use that instead:
n_jobs = default_n_jobs
self.n_jobs = n_jobs
self.verbose = verbose
self.timeout = timeout
self.pre_dispatch = pre_dispatch
if isinstance(max_nbytes, _basestring):
max_nbytes = memstr_to_bytes(max_nbytes)
self._backend_args = dict(
max_nbytes=max_nbytes,
mmap_mode=mmap_mode,
temp_folder=temp_folder,
verbose=max(0, self.verbose - 50),
)
if DEFAULT_MP_CONTEXT is not None:
self._backend_args['context'] = DEFAULT_MP_CONTEXT
if backend is None:
backend = active_backend
elif isinstance(backend, ParallelBackendBase):
# Use provided backend as is
pass
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._backend_args['context'] = backend
backend = MultiprocessingBackend()
else:
try:
backend_factory = BACKENDS[backend]
except KeyError:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, sorted(BACKENDS.keys())))
backend = backend_factory()
if (batch_size == 'auto' or isinstance(batch_size, Integral) and
batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self._backend = backend
self._output = None
self._jobs = list()
self._managed_backend = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_backend = True
self._initialize_backend()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_backend()
self._managed_backend = False
def _initialize_backend(self):
"""Build a process or thread pool and return the number of workers"""
try:
n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
**self._backend_args)
if self.timeout is not None and not self._backend.supports_timeout:
warnings.warn(
'The backend class {!r} does not support timeout. '
"You have set 'timeout={}' in Parallel but "
"the 'timeout' parameter will not be used.".format(
self._backend.__class__.__name__,
self.timeout))
except FallbackToBackend as e:
# Recursively initialize the backend in case of requested fallback.
self._backend = e.backend
n_jobs = self._initialize_backend()
return n_jobs
def _effective_n_jobs(self):
if self._backend:
return self._backend.effective_n_jobs(self.n_jobs)
return 1
def _terminate_backend(self):
if self._backend is not None:
self._backend.terminate()
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._backend.apply_async(batch, callback=cb)
self._jobs.append(job)
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto':
batch_size = self._backend.compute_batch_size()
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if len(tasks) == 0:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# Original job iterator becomes None once it has been fully
# consumed : at this point we know the total number of jobs and we are
# able to display an estimation of the remaining time based on already
# completed jobs. Otherwise, we simply display the number of completed
# tasks.
if self._original_iterator is not None:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time), ))
else:
index = self.n_completed_tasks
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1 -
self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / index) * \
(self.n_dispatched_tasks - index * 1.0)
# only display status if remaining time is greater or equal to 0
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
if getattr(self._backend, 'supports_timeout', False):
self._output.extend(job.get(timeout=self.timeout))
else:
self._output.extend(job.get())
except BaseException as exception:
# Note: we catch any BaseException instead of just Exception
# instances to also include KeyboardInterrupt.
# Stop dispatching any new job in the async callback thread
self._aborting = True
# If the backend allows it, cancel or kill remaining running
# tasks without waiting for the results as we will raise
# the exception we got back to the caller instead of returning
# any result.
backend = self._backend
if (backend is not None and
hasattr(backend, 'abort_everything')):
# If the backend is managed externally we need to make sure
# to leave it in a working state to allow for future jobs
# scheduling.
ensure_ready = self._managed_backend
backend.abort_everything(ensure_ready=ensure_ready)
if not isinstance(exception, TransportableException):
raise
else:
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_backend:
n_jobs = self._initialize_backend()
else:
n_jobs = self._effective_n_jobs()
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
try:
# Only set self._iterating to True if at least a batch
# was dispatched. In particular this covers the edge
# case of Parallel used with an exhausted iterator.
while self.dispatch_one_batch(iterator):
self._iterating = True
else:
self._iterating = False
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_backend:
self._terminate_backend()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| mit | -288,866,009,799,402,750 | 40.257463 | 103 | 0.589008 | false |
rafaduran/reqfiles | tests/conftest.py | 1 | 1083 | '''pytest conftest moduele'''
import pytest
from . import common
@pytest.fixture
def reqfile():
from reqfiles import core
return core.Reqfiles(eager=False)
@pytest.fixture
def parser():
from reqfiles import parsers
return parsers.Parser()
@pytest.fixture
def classifier():
from reqfiles import classifiers
return classifiers.BaseClassifier()
@pytest.fixture
def reqs():
return [req for req, _ in common.REQ_FIXTURES]
@pytest.fixture
def reqstrings():
return [rstring for _, (rstring, _) in common.REQ_FIXTURES]
@pytest.fixture
def links():
return [link for req, (reqstring, link) in common.REQ_FIXTURES if link]
@pytest.fixture
def sreqfile(reqfile, reqstrings, links):
reqfile._data['install_requires'] = reqstrings[:2]
reqfile._data['tests_require'] = reqstrings[3:4]
reqfile._data['extras_require']['ci'] = reqstrings[4:]
reqfile._data['dependency_links'] = links
return reqfile
@pytest.fixture
def rfc_classifier():
from reqfiles import classifiers
return classifiers.RequirementsFilesClassifier()
| bsd-3-clause | -7,082,745,547,132,856,000 | 19.826923 | 75 | 0.712835 | false |
luci/luci-py | client/tests/swarming_test.py | 3 | 87686 | #!/usr/bin/env vpython
# -*- coding: utf-8 -*-
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from __future__ import print_function
import datetime
import json
import logging
import os
import StringIO
import sys
import tempfile
import threading
import time
import traceback
import uuid
import six
# Mutates sys.path.
import test_env
# third_party/
from depot_tools import auto_stub
import isolateserver_fake
import net_utils
import swarmingserver_fake
import auth
import isolateserver
import local_caching
import swarming
from utils import file_path
from utils import logging_utils
from utils import subprocess42
from utils import tools
FILE_HASH = u'1' * 40
TEST_NAME = u'unit_tests'
OUTPUT = 'Ran stuff\n'
SHARD_OUTPUT_1 = 'Shard 1 of 3.'
SHARD_OUTPUT_2 = 'Shard 2 of 3.'
SHARD_OUTPUT_3 = 'Shard 3 of 3.'
REQUEST_UUID = '7905e667-d415-48f1-9df7-f914541d6331'
def gen_yielded_data(index, **kwargs):
"""Returns an entry as it would be yielded by yield_results()."""
return index, gen_result_response(**kwargs)
def get_results(keys, output_collector=None):
"""Simplifies the call to yield_results().
The timeout is hard-coded to 10 seconds.
"""
return list(
swarming.yield_results('https://host:9001', keys, 10., None, True,
output_collector, False, True))
def collect(url, task_ids, task_stdout=('console', 'json')):
"""Simplifies the call to swarming.collect()."""
return swarming.collect(
swarming=url,
task_ids=task_ids,
timeout=10,
decorate=True,
print_status_updates=True,
task_summary_json=None,
task_output_dir=None,
task_output_stdout=task_stdout,
include_perf=False,
filepath_filter='.*')
def main(args):
"""Bypasses swarming.main()'s exception handling.
It gets in the way when debugging test failures.
"""
dispatcher = swarming.subcommand.CommandDispatcher('swarming')
return dispatcher.execute(swarming.OptionParserSwarming(), args)
def gen_properties(**kwargs):
out = {
'caches': [],
'cipd_input': None,
'command': None,
'containment': {
'lower_priority': False,
'containment_type': 'NONE',
},
'relative_cwd': None,
'dimensions': [
{
'key': 'os',
'value': 'Mac'
},
{
'key': 'pool',
'value': 'default'
},
],
'env': [],
'env_prefixes': [],
'execution_timeout_secs': 60,
'extra_args': ['--some-arg', '123'],
'grace_period_secs': 30,
'idempotent': False,
'inputs_ref': {
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
'io_timeout_secs': 60,
'outputs': [],
'secret_bytes': None,
}
out.update(kwargs)
return out
def gen_request_data(properties=None, **kwargs):
out = {
'name': 'unit_tests',
'parent_task_id': '',
'pool_task_template': 'AUTO',
'priority': 101,
'request_uuid': REQUEST_UUID,
'task_slices': [{
'expiration_secs': 3600,
'properties': gen_properties(**(properties or {})),
'wait_for_capacity': False,
},],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
'realm': None,
'resultdb': {
'enable': False
},
}
out.update(kwargs)
return out
def gen_request_response(request, **kwargs):
# As seen in services/swarming/handlers_api.py.
out = {
'request': request.copy(),
'task_id': '12300',
}
out.update(kwargs)
return out
def gen_result_response(**kwargs):
out = {
u'bot_id': u'swarm6',
u'completed_ts': u'2014-09-24T13:49:16.012345',
u'created_ts': u'2014-09-24T13:49:03.012345',
u'duration': 0.9636809825897217,
u'exit_code': 0,
u'failure': False,
u'internal_failure': False,
u'modified_ts': u'2014-09-24T13:49:17.012345',
u'name': u'heartbeat-canary-2014-09-24_13:49:01-os=Ubuntu',
u'server_versions': [u'1'],
u'started_ts': u'2014-09-24T13:49:09.012345',
u'state': 'COMPLETED',
u'tags': [u'cpu:x86', u'priority:200', u'user:joe@localhost'],
u'task_id': u'10100',
u'try_number': 1,
u'user': u'joe@localhost',
}
out.update(kwargs)
return out
class NonBlockingEvent(threading._Event):
"""Just like threading.Event, but a class and ignores timeout in 'wait'.
Intended to be used as a mock for threading.Event in tests.
"""
def wait(self, timeout=None):
return super(NonBlockingEvent, self).wait(0)
class Common(object):
# pylint: disable=no-member
def setUp(self):
self._tempdir = None
self.mock(auth, 'ensure_logged_in', lambda _: None)
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(sys, 'stderr', StringIO.StringIO())
self.mock(logging_utils, 'prepare_logging', lambda *args: None)
self.mock(logging_utils, 'set_console_level', lambda *args: None)
self.mock(uuid, 'uuid4', lambda: REQUEST_UUID)
def tearDown(self):
if self._tempdir:
file_path.rmtree(self._tempdir)
if not self.has_failed():
self._check_output('', '')
@property
def tempdir(self):
"""Creates the directory on first reference."""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(prefix=u'swarming_test')
return self._tempdir
maxDiff = None
def _check_output(self, out, err):
self.assertMultiLineEqual(out, sys.stdout.getvalue())
self.assertMultiLineEqual(err, sys.stderr.getvalue())
# Flush their content by mocking them again.
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(sys, 'stderr', StringIO.StringIO())
def main_safe(self, args):
"""Bypasses swarming.main()'s exception handling.
It gets in the way when debugging test failures.
"""
# pylint: disable=bare-except
try:
return main(args)
except:
data = '%s\nSTDOUT:\n%s\nSTDERR:\n%s' % (
traceback.format_exc(), sys.stdout.getvalue(), sys.stderr.getvalue())
self.fail(data)
class NetTestCase(net_utils.TestCase, Common):
# These test fail when running in parallel
# Need to run in test_seq.py as an executable
no_run = 1
"""Base class that defines the url_open mock."""
def setUp(self):
net_utils.TestCase.setUp(self)
Common.setUp(self)
self.mock(time, 'sleep', lambda _: None)
self.mock(subprocess42, 'call', lambda *_: self.fail())
self.mock(threading, 'Event', NonBlockingEvent)
class TestIsolated(auto_stub.TestCase, Common):
# These test fail when running in parallel
# Need to run in test_seq.py as an executable
no_run = 1
"""Test functions with isolated_ prefix."""
def setUp(self):
auto_stub.TestCase.setUp(self)
Common.setUp(self)
self._isolate = isolateserver_fake.FakeIsolateServer()
self._swarming = swarmingserver_fake.FakeSwarmingServer()
def tearDown(self):
try:
self._isolate.close()
self._swarming.close()
finally:
Common.tearDown(self)
auto_stub.TestCase.tearDown(self)
def test_reproduce_isolated(self):
old_cwd = os.getcwd()
try:
os.chdir(self.tempdir)
def call(cmd, env, cwd):
# 'out' is the default value for --output-dir.
outdir = os.path.join(self.tempdir, 'out')
self.assertTrue(os.path.isdir(outdir))
if sys.platform == 'darwin':
# On macOS, python executable's path is a symlink and it's hard to
# assess what will be passed to the command line. :/
self.assertEqual(
[u'main.py', u'foo',
os.path.realpath(outdir), '--bar'], cmd[1:])
else:
self.assertEqual([
sys.executable, u'main.py', u'foo',
os.path.realpath(outdir), '--bar'
], cmd)
expected = os.environ.copy()
expected['SWARMING_TASK_ID'] = 'reproduce'
expected['SWARMING_BOT_ID'] = 'reproduce'
self.assertEqual(expected, env)
self.assertEqual(unicode(os.path.abspath('work')), cwd)
return 0
self.mock(subprocess42, 'call', call)
main_hash = self._isolate.add_content_compressed('default-gzip',
'not executed')
isolated = {
'files': {
'main.py': {
'h': main_hash,
's': 12,
'm': 0o700,
},
},
'command': ['python', 'main.py'],
}
isolated_hash = self._isolate.add_content_compressed(
'default-gzip', json.dumps(isolated))
self._swarming._server.tasks[123] = {
'properties': {
'inputs_ref': {
'isolatedserver': self._isolate.url,
'namespace': 'default-gzip',
'isolated': isolated_hash,
},
'extra_args': ['foo', '${ISOLATED_OUTDIR}'],
'secret_bytes': None,
},
}
ret = self.main_safe([
'reproduce',
'--swarming',
self._swarming.url,
'123',
'--',
'--bar',
])
self._check_output('', '')
self.assertEqual(0, ret)
finally:
os.chdir(old_cwd)
class TestSwarmingTrigger(NetTestCase):
def test_trigger_task_shards_2_shards(self):
def make_new_request(shard_index):
return swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id=None,
pool_task_template='AUTO',
priority=101,
task_slices=[
swarming.TaskSlice(
expiration_secs=60 * 60,
properties=swarming.TaskProperties(
caches=[],
cipd_input=None,
command=['a', 'b'],
containment=swarming.Containment(
lower_priority=False,
containment_type='NONE',
),
relative_cwd=None,
dimensions=[('os', 'Mac'), ('pool', 'default')],
env={
'GTEST_SHARD_INDEX': str(shard_index),
'GTEST_TOTAL_SHARDS': '2',
},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
wait_for_capacity=False),
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost',
realm=None,
resultdb={'enable': False})
task_request_1 = make_new_request(0)
request_1 = swarming.task_request_to_raw_request(task_request_1)
request_1['name'] = u'unit_tests:0:2'
result_1 = gen_request_response(request_1)
task_request_2 = make_new_request(1)
request_2 = swarming.task_request_to_raw_request(task_request_2)
request_2['name'] = u'unit_tests:1:2'
request_2['tags'] += [u'shard_index:1', u'total_shards:2']
result_2 = gen_request_response(request_2, task_id='12400')
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request_1
},
result_1,
),
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request_2
},
result_2,
),
])
tasks = {}
tasks.update(
swarming.trigger_task_shards(
swarming='https://localhost:1', task_request=task_request_1))
tasks.update(
swarming.trigger_task_shards(
swarming='https://localhost:1', task_request=task_request_2))
expected = {
u'unit_tests:0:2': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
},
u'unit_tests:1:2': {
'shard_index': 1,
'task_id': '12400',
'view_url': 'https://localhost:1/user/task/12400',
},
}
self.assertEqual(expected, tasks)
def test_trigger_task_shard_custom_index(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id=None,
pool_task_template='AUTO',
priority=101,
task_slices=[
swarming.TaskSlice(
expiration_secs=60 * 60,
properties=swarming.TaskProperties(
caches=[],
cipd_input=None,
command=['a', 'b'],
containment=swarming.Containment(
lower_priority=False,
containment_type='NONE',
),
relative_cwd=None,
dimensions=[('os', 'Mac'), ('pool', 'default')],
env={
'GTEST_SHARD_INDEX': '2',
'GTEST_TOTAL_SHARDS': '4'
},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
wait_for_capacity=False),
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost',
realm=None,
resultdb={'enable': False})
request_1 = swarming.task_request_to_raw_request(task_request)
request_1['name'] = u'unit_tests:2:4'
request_1['tags'].extend(['shard_index:2', 'total_shards:4'])
request_1['task_slices'][0]['properties']['env'] = [
{
'key': 'GTEST_SHARD_INDEX',
'value': '2'
},
{
'key': 'GTEST_TOTAL_SHARDS',
'value': '4'
},
]
result_1 = gen_request_response(request_1)
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request_1
},
result_1,
),
])
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1', task_request=task_request)
expected = {
u'unit_tests:2:4': {
'shard_index': 2,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
},
}
self.assertEqual(expected, tasks)
def test_trigger_task_shards_priority_override(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id='123',
pool_task_template='AUTO',
priority=101,
task_slices=[
swarming.TaskSlice(
expiration_secs=60 * 60,
properties=swarming.TaskProperties(
caches=[],
cipd_input=None,
command=['a', 'b'],
containment=swarming.Containment(
lower_priority=False,
containment_type='NONE',
),
relative_cwd=None,
dimensions=[('os', 'Mac'), ('pool', 'default')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
wait_for_capacity=False),
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost',
realm=None,
resultdb={'enable': False})
request = swarming.task_request_to_raw_request(task_request)
self.assertEqual('123', request['parent_task_id'])
result = gen_request_response(request)
result['request']['priority'] = 200
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
result,
),
])
os.environ['SWARMING_TASK_ID'] = '123'
try:
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1', task_request=task_request)
finally:
os.environ.pop('SWARMING_TASK_ID')
expected = {
u'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
}
self.assertEqual(expected, tasks)
self._check_output('', 'Priority was reset to 200\n')
def test_trigger_cipd_package(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id='123',
pool_task_template='AUTO',
priority=101,
task_slices=[
swarming.TaskSlice(
expiration_secs=60 * 60,
properties=swarming.TaskProperties(
caches=[],
cipd_input=swarming.CipdInput(
client_package=None,
packages=[
swarming.CipdPackage(
package_name='mypackage',
path='path/to/package',
version='abc123')
],
server=None),
command=['a', 'b'],
containment=swarming.Containment(
lower_priority=False,
containment_type='NONE',
),
relative_cwd=None,
dimensions=[('os', 'Mac'), ('pool', 'default')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
wait_for_capacity=False),
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost',
realm=None,
resultdb={'enable': False})
request = swarming.task_request_to_raw_request(task_request)
expected = {
'client_package': None,
'packages': [{
'package_name': 'mypackage',
'path': 'path/to/package',
'version': 'abc123',
}],
'server': None
}
self.assertEqual(expected,
request['task_slices'][0]['properties']['cipd_input'])
result = gen_request_response(request)
result['request']['priority'] = 200
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
result,
),
])
os.environ['SWARMING_TASK_ID'] = '123'
try:
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1', task_request=task_request)
finally:
os.environ.pop('SWARMING_TASK_ID')
expected = {
u'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
}
self.assertEqual(expected, tasks)
self._check_output('', 'Priority was reset to 200\n')
def test_trigger_resultdb_and_realm(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id='123',
pool_task_template='AUTO',
priority=101,
task_slices=[
swarming.TaskSlice(
expiration_secs=60 * 60,
properties=swarming.TaskProperties(
caches=[],
cipd_input=None,
command=['a', 'b'],
containment=swarming.Containment(
lower_priority=False,
containment_type='NONE',
),
relative_cwd=None,
dimensions=[('os', 'Mac'), ('pool', 'default')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
wait_for_capacity=False),
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost',
realm="chromium:try",
resultdb={'enable': True})
request = swarming.task_request_to_raw_request(task_request)
self.assertTrue(request['resultdb']['enable'])
self.assertEqual('chromium:try', request['realm'])
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
gen_request_response(request),
),
])
swarming.trigger_task_shards(
swarming='https://localhost:1', task_request=task_request)
class TestSwarmingCollection(NetTestCase):
def test_success(self):
self.expected_requests([
(
'https://host:9001/_ah/api/swarming/v1/task/10100/result',
{
'retry_50x': False
},
gen_result_response(),
),
(
'https://host:9001/_ah/api/swarming/v1/task/10100/stdout',
{},
{
'output': OUTPUT
},
),
])
expected = [gen_yielded_data(0, output=OUTPUT)]
self.assertEqual(expected, get_results(['10100']))
def test_failure(self):
self.expected_requests([
(
'https://host:9001/_ah/api/swarming/v1/task/10100/result',
{
'retry_50x': False
},
gen_result_response(exit_code=1),
),
(
'https://host:9001/_ah/api/swarming/v1/task/10100/stdout',
{},
{
'output': OUTPUT
},
),
])
expected = [gen_yielded_data(0, output=OUTPUT, exit_code=1)]
self.assertEqual(expected, get_results(['10100']))
def test_no_ids(self):
actual = get_results([])
self.assertEqual([], actual)
def test_url_errors(self):
self.mock(logging, 'error', lambda *_, **__: None)
# NOTE: get_results() hardcodes timeout=10.
now = {}
lock = threading.Lock()
def get_now():
t = threading.current_thread()
with lock:
return now.setdefault(t, range(10)).pop(0)
self.mock(swarming.net, 'sleep_before_retry', lambda _x, _y: None)
self.mock(swarming, 'now', get_now)
# The actual number of requests here depends on 'now' progressing to 10
# seconds. It's called once per loop. Loop makes 9 iterations.
self.expected_requests(9 * [(
'https://host:9001/_ah/api/swarming/v1/task/10100/result',
{
'retry_50x': False
},
None,
)])
actual = get_results(['10100'])
self.assertEqual([], actual)
self.assertTrue(all(not v for v in now.values()), now)
def test_many_shards(self):
self.expected_requests([
(
'https://host:9001/_ah/api/swarming/v1/task/10100/result',
{
'retry_50x': False
},
gen_result_response(),
),
(
'https://host:9001/_ah/api/swarming/v1/task/10100/stdout',
{},
{
'output': SHARD_OUTPUT_1
},
),
(
'https://host:9001/_ah/api/swarming/v1/task/10200/result',
{
'retry_50x': False
},
gen_result_response(),
),
(
'https://host:9001/_ah/api/swarming/v1/task/10200/stdout',
{},
{
'output': SHARD_OUTPUT_2
},
),
(
'https://host:9001/_ah/api/swarming/v1/task/10300/result',
{
'retry_50x': False
},
gen_result_response(),
),
(
'https://host:9001/_ah/api/swarming/v1/task/10300/stdout',
{},
{
'output': SHARD_OUTPUT_3
},
),
])
expected = [
gen_yielded_data(0, output=SHARD_OUTPUT_1),
gen_yielded_data(1, output=SHARD_OUTPUT_2),
gen_yielded_data(2, output=SHARD_OUTPUT_3),
]
actual = get_results(['10100', '10200', '10300'])
self.assertEqual(expected, sorted(actual))
def test_output_collector_called(self):
# Three shards, one failed. All results are passed to output collector.
self.expected_requests([
(
'https://host:9001/_ah/api/swarming/v1/task/10100/result',
{
'retry_50x': False
},
gen_result_response(),
),
(
'https://host:9001/_ah/api/swarming/v1/task/10100/stdout',
{},
{
'output': SHARD_OUTPUT_1
},
),
(
'https://host:9001/_ah/api/swarming/v1/task/10200/result',
{
'retry_50x': False
},
gen_result_response(),
),
(
'https://host:9001/_ah/api/swarming/v1/task/10200/stdout',
{},
{
'output': SHARD_OUTPUT_2
},
),
(
'https://host:9001/_ah/api/swarming/v1/task/10300/result',
{
'retry_50x': False
},
gen_result_response(exit_code=1),
),
(
'https://host:9001/_ah/api/swarming/v1/task/10300/stdout',
{},
{
'output': SHARD_OUTPUT_3
},
),
])
class FakeOutputCollector(object):
def __init__(self):
self.results = []
self._lock = threading.Lock()
def process_shard_result(self, index, result):
with self._lock:
self.results.append((index, result))
output_collector = FakeOutputCollector()
get_results(['10100', '10200', '10300'], output_collector)
expected = [
gen_yielded_data(0, output=SHARD_OUTPUT_1),
gen_yielded_data(1, output=SHARD_OUTPUT_2),
gen_yielded_data(2, output=SHARD_OUTPUT_3, exit_code=1),
]
self.assertEqual(sorted(expected), sorted(output_collector.results))
def test_collect_nothing(self):
self.mock(swarming, 'yield_results', lambda *_: [])
self.assertEqual(1, collect('https://localhost:1', ['10100', '10200']))
self._check_output('', 'Results from some shards are missing: 0, 1\n')
def test_collect_success(self):
data = gen_result_response(output='Foo')
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(0, collect('https://localhost:1', ['10100']))
expected = u'\n'.join(
('+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+------------------------------------------------------+', 'Foo',
'+------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'Total duration: 1.0s', ''))
self._check_output(expected, '')
def test_collect_success_nostdout(self):
data = gen_result_response(output='Foo')
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(0, collect('https://localhost:1', ['10100'], []))
expected = u'\n'.join(
('+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'Total duration: 1.0s', ''))
self._check_output(expected, '')
def test_collect_fail(self):
data = gen_result_response(output='Foo', exit_code=-9)
data['output'] = 'Foo'
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(-9, collect('https://localhost:1', ['10100']))
expected = u'\n'.join(
('+-------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+-------------------------------------------------------+', 'Foo',
'+-------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: -9 |',
'+-------------------------------------------------------+',
'Total duration: 1.0s', ''))
self._check_output(expected, '')
def test_collect_one_missing(self):
data = gen_result_response(output='Foo')
data['output'] = 'Foo'
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(1, collect('https://localhost:1', ['10100', '10200']))
expected = u'\n'.join(
('+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+------------------------------------------------------+', 'Foo',
'+------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+', '',
'Total duration: 1.0s', ''))
self._check_output(expected, 'Results from some shards are missing: 1\n')
def test_collect_non_ascii(self):
data = gen_result_response(output='Non-ascii character: µ')
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(0, collect('https://localhost:1', ['10100']))
expected = u'\n'.join(
('+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+------------------------------------------------------+',
'Non-ascii character: µ',
'+------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'Total duration: 1.0s', ''))
self._check_output(expected, '')
def test_collect_multi(self):
actual_calls = []
def fetch_isolated(isolated_hash, storage, cache, outdir, use_symlinks,
filepath_filter):
self.assertIs(storage.__class__, isolateserver.Storage)
self.assertIs(cache.__class__, local_caching.MemoryContentAddressedCache)
# Ensure storage is pointing to required location.
self.assertEqual('https://localhost:2', storage.server_ref.url)
self.assertEqual('default', storage.server_ref.namespace)
self.assertEqual(False, use_symlinks)
self.assertEqual('.*', filepath_filter)
actual_calls.append((isolated_hash, outdir))
self.mock(isolateserver, 'fetch_isolated', fetch_isolated)
collector = swarming.TaskOutputCollector(self.tempdir, ['json', 'console'],
2, '.*')
for index in range(2):
collector.process_shard_result(
index,
gen_result_response(
outputs_ref={
'isolated': str(index) * 40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
}))
summary = collector.finalize()
expected_calls = [
('0' * 40, os.path.join(self.tempdir, '0')),
('1' * 40, os.path.join(self.tempdir, '1')),
]
self.assertEqual(expected_calls, actual_calls)
# Ensure collected summary is correct.
outputs_refs = [
{
'isolated':
'0' * 40,
'isolatedserver':
'https://localhost:2',
'namespace':
'default',
'view_url':
'https://localhost:2/browse?namespace=default&hash=' + '0' * 40,
},
{
'isolated':
'1' * 40,
'isolatedserver':
'https://localhost:2',
'namespace':
'default',
'view_url':
'https://localhost:2/browse?namespace=default&hash=' + '1' * 40,
},
]
expected = {
'shards': [gen_result_response(outputs_ref=o) for o in outputs_refs],
}
self.assertEqual(expected, summary)
# Ensure summary dumped to a file is correct as well.
with open(os.path.join(self.tempdir, 'summary.json'), 'r') as f:
summary_dump = json.load(f)
self.assertEqual(expected, summary_dump)
def test_ensures_same_server(self):
self.mock(logging, 'error', lambda *_: None)
# Two shard results, attempt to use different servers.
actual_calls = []
self.mock(isolateserver,
'fetch_isolated', lambda *args: actual_calls.append(args))
data = [
gen_result_response(
outputs_ref={
'isolatedserver': 'https://server1',
'namespace': 'namespace',
'isolated': 'hash1',
}),
gen_result_response(
outputs_ref={
'isolatedserver': 'https://server2',
'namespace': 'namespace',
'isolated': 'hash1',
}),
]
# Feed them to collector.
collector = swarming.TaskOutputCollector(self.tempdir, ['json', 'console'],
2, None)
for index, result in enumerate(data):
collector.process_shard_result(index, result)
collector.finalize()
# Only first fetch is made, second one is ignored.
self.assertEqual(1, len(actual_calls))
isolated_hash, storage, _, outdir, _, _ = actual_calls[0]
self.assertEqual(('hash1', os.path.join(self.tempdir, '0')),
(isolated_hash, outdir))
self.assertEqual('https://server1', storage.server_ref.url)
class TestMain(NetTestCase):
# Tests calling main().
def test_bot_delete(self):
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/bot/foo/delete',
{
'method': 'POST',
'data': {}
},
{},
),
])
ret = self.main_safe(
['bot_delete', '--swarming', 'https://localhost:1', 'foo', '--force'])
self._check_output('', '')
self.assertEqual(0, ret)
def test_trigger_raw_cmd(self):
# Minimalist use.
request = {
'name': u'None/pool=default',
'parent_task_id': '',
'pool_task_template': 'AUTO',
'priority': 200,
'request_uuid': REQUEST_UUID,
'task_slices': [{
'expiration_secs':
21600,
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{
'key': 'pool',
'value': 'default'
}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
'wait_for_capacity':
False,
},],
'tags': [],
'user': None,
'realm': None,
'resultdb': {
'enable': False
},
}
result = gen_request_response(request)
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming',
'https://localhost:1',
'--dimension',
'pool',
'default',
'--raw-cmd',
'--relative-cwd',
'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
# pylint: disable=no-member
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/pool=default\n'
'To collect results, use:\n'
' tools/swarming_client/swarming.py collect '
'-S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n', '')
def test_trigger_raw_cmd_with_optional(self):
request = {
'name': u'None/caches=c1_foo=bar_foo1=bar1_pool=default',
'parent_task_id': '',
'pool_task_template': 'AUTO',
'priority': 200,
'request_uuid': REQUEST_UUID,
'task_slices': [
{
'expiration_secs':
60,
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[
{
'key': 'caches',
'value': 'c1'
},
{
'key': 'caches',
'value': 'c2'
},
{
'key': 'foo',
'value': 'baz'
},
{
'key': 'foo1',
'value': 'baz1'
},
{
'key': 'opt',
'value': 'tional'
},
{
'key': 'pool',
'value': 'default'
},
],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
'wait_for_capacity':
False,
},
{
'expiration_secs':
120,
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[
{
'key': 'caches',
'value': 'c1'
},
{
'key': 'caches',
'value': 'c2'
},
{
'key': 'foo',
'value': 'bar'
},
{
'key': 'foo1',
'value': 'baz1'
},
{
'key': 'pool',
'value': 'default'
},
],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
'wait_for_capacity':
False,
},
{
'expiration_secs':
21420,
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[
{
'key': 'caches',
'value': 'c1'
},
{
'key': 'foo',
'value': 'bar'
},
{
'key': 'foo1',
'value': 'bar1'
},
{
'key': 'pool',
'value': 'default'
},
],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
'wait_for_capacity':
False,
},
],
'tags': [],
'user': None,
'realm': None,
'resultdb': {
'enable': False
},
}
result = gen_request_response(request)
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming',
'https://localhost:1',
'--dimension',
'pool',
'default',
'--dimension',
'foo',
'bar',
'--dimension',
'foo1',
'bar1',
'--dimension',
'caches',
'c1',
'--optional-dimension',
'foo',
'baz',
60,
'--optional-dimension',
'opt',
'tional',
60,
'--optional-dimension',
'foo1',
'baz1',
180,
'--optional-dimension',
'caches',
'c2',
180,
'--raw-cmd',
'--relative-cwd',
'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
# pylint: disable=no-member
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/caches=c1_foo=bar_foo1=bar1_pool=default\n'
'To collect results, use:\n'
' tools/swarming_client/swarming.py collect '
'-S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n', '')
def test_trigger_raw_cmd_with_optional_unsorted(self):
request = {
'name': u'None/foo1=bar1_os=Mac-10.12.6_pool=default',
'parent_task_id': '',
'pool_task_template': 'AUTO',
'priority': 200,
'request_uuid': REQUEST_UUID,
'task_slices': [
{
'expiration_secs':
60,
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[
{
'key': 'foo1',
'value': 'baz1'
},
{
'key': 'os',
'value': 'Mac-10.13'
},
{
'key': 'pool',
'value': 'default'
},
],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
'wait_for_capacity':
False,
},
{
'expiration_secs':
60,
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[
{
'key': 'foo1',
'value': 'baz1'
},
{
'key': 'os',
'value': 'Mac-10.12.6'
},
{
'key': 'pool',
'value': 'default'
},
],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
'wait_for_capacity':
False,
},
{
'expiration_secs':
21480,
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[
{
'key': 'foo1',
'value': 'bar1'
},
{
'key': 'os',
'value': 'Mac-10.12.6'
},
{
'key': 'pool',
'value': 'default'
},
],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
'wait_for_capacity':
False,
},
],
'tags': [],
'user': None,
'realm': None,
'resultdb': {
'enable': False
},
}
result = gen_request_response(request)
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming',
'https://localhost:1',
'--dimension',
'os',
'Mac-10.12.6',
'--dimension',
'pool',
'default',
'--dimension',
'foo1',
'bar1',
'--optional-dimension',
'foo1',
'baz1',
120,
'--optional-dimension',
'os',
'Mac-10.13',
60,
'--raw-cmd',
'--relative-cwd',
'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
# pylint: disable=no-member
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/foo1=bar1_os=Mac-10.12.6_pool=default\n'
'To collect results, use:\n'
' tools/swarming_client/swarming.py collect '
'-S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n', '')
def test_trigger_raw_cmd_with_optional_sameexp(self):
request = {
'name': u'None/foo=bar_foo1=bar1_pool=default',
'parent_task_id': '',
'pool_task_template': 'AUTO',
'priority': 200,
'request_uuid': REQUEST_UUID,
'task_slices': [
{
'expiration_secs':
60,
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[
{
'key': 'foo',
'value': 'baz'
},
{
'key': 'foo1',
'value': 'bar1'
},
{
'key': 'foo2',
'value': 'baz2'
},
{
'key': 'pool',
'value': 'default'
},
],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
'wait_for_capacity':
False,
},
{
'expiration_secs':
21540, # 21600 - 60
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[
{
'key': 'foo',
'value': 'bar'
},
{
'key': 'foo1',
'value': 'bar1'
},
{
'key': 'pool',
'value': 'default'
},
],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
'wait_for_capacity':
False,
},
],
'tags': [],
'user': None,
'realm': None,
'resultdb': {
'enable': False
},
}
result = gen_request_response(request)
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming',
'https://localhost:1',
'--dimension',
'pool',
'default',
'--dimension',
'foo',
'bar',
'--dimension',
'foo1',
'bar1',
'--optional-dimension',
'foo',
'baz',
60,
'--optional-dimension',
'foo2',
'baz2',
60,
'--raw-cmd',
'--relative-cwd',
'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
# pylint: disable=no-member
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/foo=bar_foo1=bar1_pool=default\n'
'To collect results, use:\n'
' tools/swarming_client/swarming.py collect '
'-S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n', '')
def test_trigger_raw_cmd_isolated(self):
# Minimalist use.
request = {
'name': u'None/pool=default/' + FILE_HASH,
'parent_task_id': '',
'pool_task_template': 'AUTO',
'priority': 200,
'request_uuid': REQUEST_UUID,
'task_slices': [{
'expiration_secs':
21600,
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{
'key': 'pool',
'value': 'default'
}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
},
io_timeout_secs=1200),
'wait_for_capacity':
False,
},],
'tags': [],
'user': None,
'realm': None,
'resultdb': {
'enable': False
},
}
result = gen_request_response(request)
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming',
'https://localhost:1',
'--dimension',
'pool',
'default',
'--raw-cmd',
'--isolate-server',
'https://localhost:2',
'--isolated',
FILE_HASH,
'--',
'python',
'-c',
'print(\'hi\')',
])
# pylint: disable=no-member
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
u'Triggered task: None/pool=default/' + FILE_HASH + u'\n'
u'To collect results, use:\n'
u' tools/swarming_client/swarming.py collect '
u'-S https://localhost:1 12300\n'
u'Or visit:\n'
u' https://localhost:1/user/task/12300\n', u'')
def test_trigger_raw_cmd_with_service_account(self):
# Minimalist use.
request = {
'name': u'None/pool=default',
'parent_task_id': '',
'pool_task_template': 'AUTO',
'priority': 200,
'request_uuid': REQUEST_UUID,
'task_slices': [{
'expiration_secs':
21600,
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{
'key': 'pool',
'value': 'default'
}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200),
'wait_for_capacity':
False,
},],
'service_account': 'bot',
'tags': [],
'user': None,
'realm': None,
'resultdb': {
'enable': False
},
}
result = gen_request_response(request)
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming',
'https://localhost:1',
'--dimension',
'pool',
'default',
'--service-account',
'bot',
'--raw-cmd',
'--',
'python',
'-c',
'print(\'hi\')',
])
# pylint: disable=no-member
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/pool=default\n'
'To collect results, use:\n'
' tools/swarming_client/swarming.py collect '
'-S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n', '')
def test_trigger_isolated_hash(self):
self.mock(swarming, 'now', lambda: 123456)
request = gen_request_data(task_slices=[
{
'expiration_secs':
3600,
'properties':
gen_properties(
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
'wait_for_capacity':
False,
},
])
result = gen_request_response(request)
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming',
'https://localhost:1',
'--isolate-server',
'https://localhost:2',
'--priority',
'101',
'--dimension',
'os',
'Mac',
'--dimension',
'pool',
'default',
'--expiration',
'3600',
'--user',
'joe@localhost',
'--tags',
'tag:a',
'--tags',
'tag:b',
'--hard-timeout',
'60',
'--io-timeout',
'60',
'--task-name',
'unit_tests',
'--isolated',
FILE_HASH,
'--',
'--some-arg',
'123',
])
# pylint: disable=no-member
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' tools/swarming_client/swarming.py collect '
'-S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n', '')
def test_trigger_isolated_and_json(self):
write_json_calls = []
self.mock(tools, 'write_json', lambda *args: write_json_calls.append(args))
subprocess_calls = []
self.mock(subprocess42, 'call', lambda *c: subprocess_calls.append(c))
self.mock(swarming, 'now', lambda: 123456)
isolated = os.path.join(self.tempdir, 'zaz.isolated')
content = '{}'
with open(isolated, 'wb') as f:
f.write(content)
isolated_hash = isolateserver_fake.hash_content(content)
request = gen_request_data(task_slices=[
{
'expiration_secs':
3600,
'properties':
gen_properties(
idempotent=True,
inputs_ref={
'isolated': isolated_hash,
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
'wait_for_capacity':
False,
},
])
result = gen_request_response(request)
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming',
'https://localhost:1',
'--isolate-server',
'https://localhost:2',
'--priority',
'101',
'--dimension',
'os',
'Mac',
'--dimension',
'pool',
'default',
'--expiration',
'3600',
'--user',
'joe@localhost',
'--tags',
'tag:a',
'--tags',
'tag:b',
'--hard-timeout',
'60',
'--io-timeout',
'60',
'--idempotent',
'--task-name',
'unit_tests',
'--dump-json',
'foo.json',
'--isolated',
isolated_hash,
'--',
'--some-arg',
'123',
])
# pylint: disable=no-member
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self.assertEqual([], subprocess_calls)
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' tools/swarming_client/swarming.py collect '
'-S https://localhost:1 --json foo.json\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n', '')
expected = [
(
u'foo.json',
{
'base_task_name': 'unit_tests',
'tasks': {
'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
},
'request': {
'name': 'unit_tests',
'parent_task_id': '',
'pool_task_template': 'AUTO',
'priority': 101,
'request_uuid': REQUEST_UUID,
'task_slices': [{
'expiration_secs':
3600,
'properties':
gen_properties(
idempotent=True,
inputs_ref={
'isolated': isolated_hash,
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
'wait_for_capacity':
False,
},],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
'realm': None,
'resultdb': {
'enable': False
},
},
},
True,
),
]
self.assertEqual(expected, write_json_calls)
def test_trigger_cipd(self):
self.mock(swarming, 'now', lambda: 123456)
request = gen_request_data(task_slices=[
{
'expiration_secs':
3600,
'properties':
gen_properties(
cipd_input={
'client_package': None,
'packages': [{
'package_name': 'super/awesome/pkg',
'path': 'path/to/pkg',
'version': 'version:42',
},],
'server': None,
},
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
'wait_for_capacity':
False,
},
])
result = gen_request_response(request)
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming',
'https://localhost:1',
'--isolate-server',
'https://localhost:2',
'--priority',
'101',
'--dimension',
'os',
'Mac',
'--dimension',
'pool',
'default',
'--expiration',
'3600',
'--user',
'joe@localhost',
'--tags',
'tag:a',
'--tags',
'tag:b',
'--hard-timeout',
'60',
'--io-timeout',
'60',
'--task-name',
'unit_tests',
'--isolated',
FILE_HASH,
'--cipd-package',
'path/to/pkg:super/awesome/pkg:version:42',
'--',
'--some-arg',
'123',
])
# pylint: disable=no-member
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' tools/swarming_client/swarming.py collect '
'-S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n', '')
def test_trigger_no_request(self):
with self.assertRaises(SystemExit):
main([
'trigger',
'--swarming',
'https://host',
'--isolate-server',
'https://host',
'-T',
'foo',
'-d',
'pool',
'default',
])
self._check_output(
'', 'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]\n'
'\n'
'swarming.py: error: Specify at least one of --raw-cmd or --isolated '
'or both\n')
def test_trigger_no_env_vars(self):
with self.assertRaises(SystemExit):
main(['trigger'])
self._check_output(
'', 'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: --swarming is required.'
'\n')
def test_trigger_no_swarming_env_var(self):
with self.assertRaises(SystemExit):
with test_env.EnvVars({'ISOLATE_SERVER': 'https://host'}):
main(['trigger', '-T' 'foo', 'foo.isolated'])
self._check_output(
'', 'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: --swarming is required.'
'\n')
def test_trigger_no_isolate_server(self):
with self.assertRaises(SystemExit):
with test_env.EnvVars({'SWARMING_SERVER': 'https://host'}):
main(['trigger', 'foo.isolated', '-d', 'pool', 'default'])
self._check_output(
'', 'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: Specify at least one of --raw-cmd or --isolated '
'or both\n')
def test_trigger_no_dimension(self):
with self.assertRaises(SystemExit):
main([
'trigger',
'--swarming',
'https://host',
'--raw-cmd',
'--',
'foo',
])
self._check_output(
'', 'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: Please at least specify one --dimension\n')
def test_collect_default_json(self):
j = os.path.join(self.tempdir, 'foo.json')
data = {
'base_task_name': 'unit_tests',
'tasks': {
'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
},
'request': {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [{
'expiration_secs':
3600,
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
relative_cwd='deeep'),
'wait_for_capacity':
False,
},],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
},
}
with open(j, 'wb') as f:
json.dump(data, f)
def stub_collect(swarming_server, task_ids, timeout, decorate,
print_status_updates, task_summary_json, task_output_dir,
task_output_stdout, include_perf, filepath_filter):
self.assertEqual('https://host', swarming_server)
self.assertEqual([u'12300'], task_ids)
# It is automatically calculated from hard timeout + expiration + 10.
self.assertEqual(3670., timeout)
self.assertEqual(True, decorate)
self.assertEqual(True, print_status_updates)
self.assertEqual('/a', task_summary_json)
self.assertEqual('/b', task_output_dir)
self.assertSetEqual(set(['console', 'json']), set(task_output_stdout))
self.assertEqual(False, include_perf)
self.assertEqual('output.json', filepath_filter)
print('Fake output')
self.mock(swarming, 'collect', stub_collect)
self.main_safe([
'collect', '--swarming', 'https://host', '--json', j, '--decorate',
'--print-status-updates', '--task-summary-json', '/a',
'--task-output-dir', '/b', '--task-output-stdout', 'all',
'--filepath-filter', 'output.json'
])
self._check_output('Fake output\n', '')
def test_post(self):
out = StringIO.StringIO()
err = StringIO.StringIO()
self.mock(sys, 'stdin', StringIO.StringIO('{"a":"b"}'))
self.mock(sys, 'stdout', out)
self.mock(sys, 'stderr', err)
self.expected_requests([
(
'http://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': '{"a":"b"}',
'method': 'POST'
},
'{"yo":"dawg"}',
{},
),
])
ret = self.main_safe(['post', '-S', 'http://localhost:1', 'tasks/new'])
self.assertEqual(0, ret)
self.assertEqual('{"yo":"dawg"}', out.getvalue())
self.assertEqual('', err.getvalue())
def test_post_fail(self):
out = StringIO.StringIO()
err = StringIO.StringIO()
self.mock(sys, 'stdin', StringIO.StringIO('{"a":"b"}'))
self.mock(sys, 'stdout', out)
self.mock(sys, 'stderr', err)
ret = self.main_safe(['post', '-S', 'http://localhost:1', 'tasks/new'])
self.assertEqual(1, ret)
self.assertEqual('', out.getvalue())
self.assertEqual('No response!\n', err.getvalue())
def test_query_base(self):
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/bot/botid/tasks?limit=200',
{},
{
'yo': 'dawg'
},
),
])
ret = self.main_safe([
'query',
'--swarming',
'https://localhost:1',
'bot/botid/tasks',
])
self._check_output('{\n "yo": "dawg"\n}\n', '')
self.assertEqual(0, ret)
def test_query_cursor(self):
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/bot/botid/tasks?'
'foo=bar&limit=2',
{},
{
'cursor': '%',
'extra': False,
'items': ['A'],
},
),
(
'https://localhost:1/_ah/api/swarming/v1/bot/botid/tasks?'
'foo=bar&cursor=%25&limit=1',
{},
{
'cursor': None,
'items': ['B'],
'ignored': True,
},
),
])
ret = self.main_safe([
'query',
'--swarming',
'https://localhost:1',
'bot/botid/tasks?foo=bar',
'--limit',
'2',
])
expected = ('{\n'
' "extra": false, \n'
' "items": [\n'
' "A", \n'
' "B"\n'
' ]\n'
'}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_reproduce(self):
old_cwd = os.getcwd()
try:
os.chdir(self.tempdir)
def call(cmd, env, cwd):
w = os.path.abspath('work')
self.assertEqual([os.path.join(w, 'foo'), '--bar'], cmd)
expected = os.environ.copy()
expected['aa'] = 'bb'
expected['PATH'] = os.pathsep.join(
(os.path.join(w, 'foo',
'bar'), os.path.join(w, 'second'), expected['PATH']))
expected['SWARMING_TASK_ID'] = 'reproduce'
expected['SWARMING_BOT_ID'] = 'reproduce'
self.assertEqual(expected, env)
self.assertEqual(unicode(w), cwd)
return 0
self.mock(subprocess42, 'call', call)
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/task/123/request',
{},
{
'properties': {
'command': ['foo'],
'env': [{
'key': 'aa',
'value': 'bb'
},],
'env_prefixes': [{
'key': 'PATH',
'value': ['foo/bar', 'second']
},],
'secret_bytes': None,
},
},
),
])
ret = self.main_safe([
'reproduce',
'--swarming',
'https://localhost:1',
'123',
'--',
'--bar',
])
self._check_output('', '')
self.assertEqual(0, ret)
finally:
os.chdir(old_cwd)
def test_run(self):
request = {
'name': u'None/pool=default',
'parent_task_id': '',
'priority': 200,
'pool_task_template': 'AUTO',
'request_uuid': REQUEST_UUID,
'task_slices': [{
'expiration_secs':
21600,
'properties':
gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{
'key': 'pool',
'value': 'default'
}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
'wait_for_capacity':
False,
},],
'tags': [],
'user': None,
'realm': None,
'resultdb': {
'enable': False
},
}
result = gen_request_response(request)
def stub_collect(swarming_server, task_ids, timeout, decorate,
print_status_updates, task_summary_json, task_output_dir,
task_output_stdout, include_perf, filepath_filter):
self.assertEqual('https://localhost:1', swarming_server)
self.assertEqual([u'12300'], task_ids)
# It is automatically calculated from hard timeout + expiration + 10.
self.assertEqual(25210., timeout)
self.assertEqual(None, decorate)
self.assertEqual(None, print_status_updates)
self.assertEqual(None, task_summary_json)
self.assertEqual(None, task_output_dir)
self.assertSetEqual(set(['console', 'json']), set(task_output_stdout))
self.assertEqual(False, include_perf)
self.assertEqual(None, filepath_filter)
print('Fake output')
return 0
self.mock(swarming, 'collect', stub_collect)
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/tasks/new',
{
'data': request
},
result,
),
])
ret = self.main_safe([
'run',
'--swarming',
'https://localhost:1',
'--dimension',
'pool',
'default',
'--raw-cmd',
'--relative-cwd',
'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
# pylint: disable=no-member
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (ret, actual, sys.stderr.getvalue()))
self._check_output(
u'Triggered task: None/pool=default\n'
u'Task: https://localhost:1/task?id=12300\nFake output\n', '')
def test_cancel(self):
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/task/10100/cancel',
{
'data': {
'kill_running': False
},
'method': 'POST'
},
{
'yo': 'dawg'
},
),
])
ret = self.main_safe([
'cancel',
'--swarming',
'https://localhost:1',
'10100',
])
self._check_output('', '')
self.assertEqual(0, ret)
def test_collect_timeout_zero(self):
j = os.path.join(self.tempdir, 'foo.json')
pending = gen_result_response(state='PENDING')
self.expected_requests([
(
'https://localhost:1/_ah/api/swarming/v1/task/10100/result',
{
'retry_50x': True
},
pending,
),
])
self.main_safe([
'collect',
'--swarming',
'https://localhost:1',
'--task-summary-json',
j,
'--timeout',
'-1',
'10100',
])
self._check_output('swarm6: 10100 0\n', '')
with open(j, 'r') as f:
actual = json.load(f)
self.assertEqual({u'shards': [pending]}, actual)
class TestCommandBot(NetTestCase):
# Specialized test fixture for command 'bot'.
def setUp(self):
super(TestCommandBot, self).setUp()
# Sample data retrieved from actual server.
self.now = six.text_type(
datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
self.bot_1 = {
u'bot_id': u'swarm1',
u'created_ts': self.now,
u'dimensions': [
{
u'key': u'cores',
u'value': [u'8']
},
{
u'key': u'cpu',
u'value': [u'x86', u'x86-64']
},
{
u'key': u'gpu',
u'value': []
},
{
u'key': u'id',
u'value': [u'swarm1']
},
{
u'key': u'os',
u'value': [u'Ubuntu', u'Ubuntu-12.04']
},
],
u'external_ip': u'1.1.1.1',
u'hostname': u'swarm1.example.com',
u'internal_ip': u'192.168.0.1',
u'is_dead': True,
u'last_seen_ts': 'A long time ago',
u'quarantined': False,
u'task_id': u'',
u'task_name': None,
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_2 = {
u'bot_id': u'swarm2',
u'created_ts': self.now,
u'dimensions': [
{
u'key': u'cores',
u'value': [u'8']
},
{
u'key': u'cpu',
u'value': [u'x86', u'x86-64']
},
{
u'key':
u'gpu',
u'value': [
u'15ad',
u'15ad:0405',
u'VMware Virtual SVGA 3D Graphics Adapter',
]
},
{
u'key': u'id',
u'value': [u'swarm2']
},
{
u'key': u'os',
u'value': [u'Windows', u'Windows-6.1']
},
],
u'external_ip': u'1.1.1.2',
u'hostname': u'swarm2.example.com',
u'internal_ip': u'192.168.0.2',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'',
u'task_name': None,
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_3 = {
u'bot_id': u'swarm3',
u'created_ts': self.now,
u'dimensions': [
{
u'key': u'cores',
u'value': [u'4']
},
{
u'key': u'cpu',
u'value': [u'x86', u'x86-64']
},
{
u'key': u'gpu',
u'value': [u'15ad', u'15ad:0405']
},
{
u'key': u'id',
u'value': [u'swarm3']
},
{
u'key': u'os',
u'value': [u'Mac', u'Mac-10.9']
},
],
u'external_ip': u'1.1.1.3',
u'hostname': u'swarm3.example.com',
u'internal_ip': u'192.168.0.3',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'148569b73a89501',
u'task_name': u'browser_tests',
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_4 = {
u'bot_id': u'swarm4',
u'created_ts': self.now,
u'dimensions': [
{
u'key': u'cores',
u'value': [u'8']
},
{
u'key': u'cpu',
u'value': [u'x86', u'x86-64']
},
{
u'key': u'gpu',
u'value': []
},
{
u'key': u'id',
u'value': [u'swarm4']
},
{
u'key': u'os',
u'value': [u'Ubuntu', u'Ubuntu-12.04']
},
],
u'external_ip': u'1.1.1.4',
u'hostname': u'swarm4.example.com',
u'internal_ip': u'192.168.0.4',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'14856971a64c601',
u'task_name': u'base_unittests',
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
def mock_swarming_api(self, bots, cursor):
"""Returns fake /_ah/api/swarming/v1/bots/list data."""
# Sample data retrieved from actual server.
return {
u'items': bots,
u'cursor': cursor,
u'death_timeout': 1800.0,
u'limit': 4,
u'now': six.text_type(self.now),
}
def test_bots(self):
base_url = 'https://localhost:1/_ah/api/swarming/v1/bots/list?'
self.expected_requests([
(
base_url + 'is_dead=FALSE&is_busy=NONE',
{},
self.mock_swarming_api([self.bot_2], 'opaque'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&cursor=opaque',
{},
self.mock_swarming_api([self.bot_3], 'opaque2'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&cursor=opaque2',
{},
self.mock_swarming_api([self.bot_4], None),
),
])
ret = self.main_safe(['bots', '--swarming', 'https://localhost:1'])
expected = (
u'swarm2\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": '
'["15ad", "15ad:0405", "VMware Virtual SVGA 3D Graphics Adapter"], '
'"id": ["swarm2"], "os": ["Windows", "Windows-6.1"]}\n'
'swarm3\n'
' {"cores": ["4"], "cpu": ["x86", "x86-64"], "gpu": ["15ad", '
'"15ad:0405"], "id": ["swarm3"], "os": ["Mac", "Mac-10.9"]}\n'
u' task: 148569b73a89501\n'
u'swarm4\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm4"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u' task: 14856971a64c601\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_bare(self):
base_url = 'https://localhost:1/_ah/api/swarming/v1/bots/list?'
self.expected_requests([
(
base_url + 'is_dead=FALSE&is_busy=NONE',
{},
self.mock_swarming_api([self.bot_2], 'opaque'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&cursor=opaque',
{},
self.mock_swarming_api([self.bot_3], 'opaque2'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&cursor=opaque2',
{},
self.mock_swarming_api([self.bot_4], None),
),
])
ret = self.main_safe(
['bots', '--swarming', 'https://localhost:1', '--bare'])
self._check_output("swarm2\nswarm3\nswarm4\n", '')
self.assertEqual(0, ret)
def test_bots_filter(self):
base_url = 'https://localhost:1/_ah/api/swarming/v1/bots/list?'
self.expected_requests([
(
base_url + 'is_dead=FALSE&is_busy=TRUE&dimensions=os%3AWindows',
{},
self.mock_swarming_api([self.bot_2], None),
),
])
ret = self.main_safe([
'bots',
'--swarming',
'https://localhost:1',
'--busy',
'--dimension',
'os',
'Windows',
])
expected = (u'swarm2\n {"cores": ["8"], "cpu": ["x86", "x86-64"], '
'"gpu": ["15ad", "15ad:0405", "VMware Virtual SVGA 3D Graphics '
'Adapter"], "id": ["swarm2"], '
'"os": ["Windows", "Windows-6.1"]}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_filter_keep_dead(self):
base_url = 'https://localhost:1/_ah/api/swarming/v1/bots/list?'
self.expected_requests([
(
base_url + 'is_dead=NONE&is_busy=NONE',
{},
self.mock_swarming_api([self.bot_1, self.bot_4], None),
),
])
ret = self.main_safe([
'bots',
'--swarming',
'https://localhost:1',
'--keep-dead',
])
expected = (
u'swarm1\n {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm1"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u'swarm4\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm4"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u' task: 14856971a64c601\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_filter_dead_only(self):
base_url = 'https://localhost:1/_ah/api/swarming/v1/bots/list?'
self.expected_requests([
(
base_url + 'is_dead=TRUE&is_busy=NONE&dimensions=os%3AUbuntu',
{},
self.mock_swarming_api([self.bot_1], None),
),
])
ret = self.main_safe([
'bots',
'--swarming',
'https://localhost:1',
'--dimension',
'os',
'Ubuntu',
'--dead-only',
])
expected = (
u'swarm1\n {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm1"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
if __name__ == '__main__':
for env_var_to_remove in ('ISOLATE_SERVER', 'SWARMING_TASK_ID',
'SWARMING_SERVER'):
os.environ.pop(env_var_to_remove, None)
test_env.main()
| apache-2.0 | -4,648,114,395,399,296,000 | 30.19317 | 80 | 0.437081 | false |
repotvsupertuga/tvsupertuga.repository | script.module.resolveurl/lib/resolveurl/plugins/sapo.py | 3 | 1664 | """
resolveurl XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
class SapoResolver(ResolveUrl):
name = "sapo"
domains = ["videos.sapo.pt"]
pattern = '(?://|\.)(videos\.sapo\.pt)/([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
if html:
try:
video_link = re.search(r'data-video-link=[\"\'](.+?)[\"\']', html).groups()[0]
if video_link.startswith('//'): video_link = 'http:%s' % video_link
return video_link
except:
raise ResolverError('No playable video found.')
else:
raise ResolverError('No playable video found.')
def get_url(self, host, media_id):
return 'http://%s/%s' % (host, media_id)
| gpl-2.0 | -301,775,066,573,403,600 | 32.959184 | 94 | 0.628005 | false |
CFIS-Octarine/octarine | src/daomop/viewer.py | 1 | 42162 | import os
import re
import logging
from multiprocessing.dummy import Pool, Lock
from math import atan2, degrees
from multiprocessing.pool import ApplyResult
from astropy.time import Time
from cadcutils.exceptions import NotFoundException
from mp_ephem import ObsRecord
import tempfile
from . import candidate
from . import downloader
from . import storage
from ginga import AstroImage
from ginga.web.pgw import ipg, Widgets, Viewers
from ginga.misc import log
from astropy.wcs import WCS
DISPLAY_KEYWORDS = ['EXPNUM', 'DATE-OBS', 'UTC-OBS', 'EXPTIME', 'FILTER']
LEGEND = 'Keyboard Shortcuts: \n' \
'f: image backwards \n' \
'g: image forwards \n' \
'q: pan mode \n(click and drag on canvas)\n' \
't: contrast mode \n(right click on canvas after pressing "t" to reset contrast)\n' \
'esc: reset keyboard mode\n'
ACCEPTED_DIRECTORY = 'accepted'
PROCESSES = 5
class ConsoleBoxStream(object):
"""
A class that writes to a console box as a stream.
"""
def __init__(self, console_box):
self.console_box = console_box
def write(self, content):
"""
:param content: content to write to the console stream.
:return:
"""
self.console_box.append_text(content)
def flush(self):
pass
class ValidateGui(ipg.EnhancedCanvasView):
def __init__(self, logger, window, bindings=None):
"""
:param logger: a logger object to send messages to
:type logger: logging.Logger
:param window: The main window of the application
:param bindings: Any bindings previously set on this window.
"""
super(ValidateGui, self).__init__(logger=logger, bindings=bindings)
self.console_box = Widgets.TextArea(editable=False)
self.downloader = downloader.Downloader()
self.pool = Pool(processes=PROCESSES)
self.lock = Lock()
self.image_list = {}
self.astro_images = {}
self.logger = logger
console_handler = logging.StreamHandler(stream=ConsoleBoxStream(self.console_box))
console_handler.formatter = logging.Formatter(fmt="%(message)s")
self.logger.addHandler(console_handler)
self.top = window
self.enable_autocuts('on')
self.set_autocut_params('zscale')
# creating drawing canvas; initializing polygon types
self.canvas = self.add_canvas()
self.circle = self.canvas.get_draw_class('circle')
# creating key-press event handling
self.canvas.add_callback('key-press', self._key_press, 'key', self)
# remove callbacks for clicking on the canvas (which is over the viewer)
self.canvas.delete_callback('cursor-down')
self.canvas.delete_callback('cursor-up')
self.obs_number = 0
self.candidate = None
self.candidates = None
self.zoom = None
self._center = None
self.healpix = None
self.storage_list = None
self.override = None
self.qrun_id = None
self.length_check = False
# GUI elements
self.pixel_base = 1.0
self.readout = Widgets.Label("")
self.header_box = Widgets.TextArea(editable=False)
self.accept = Widgets.Button("Accept")
self.reject = Widgets.Button("Reject")
self.next_set = Widgets.Button("Next Set >")
self.previous_set = Widgets.Button("< Previous Set")
self.load_json = Widgets.Button("Load")
self.clear_button = Widgets.Button("Clear")
self.yes_button = Widgets.Button("Yes")
self.no_button = Widgets.Button("No")
self.reload_button = Widgets.Button("Reload")
self.warning = Widgets.Label("In case you try to reject a previously accepted candidate: ")
self.legend = Widgets.TextArea(wrap=True)
self.legend.set_text(LEGEND)
self.build_gui(self.top)
self.comparison_images = {}
self.null_observation = {}
self.next_image = None
def build_gui(self, container):
"""
Building the GUI to be displayed in an HTML5 canvas.
Tested and working in Mozilla Firefox and Google Chrome web browsers.
:param container: ginga.web.pgw.Widgets.TopLevel object
"""
bindings = self.get_bindings()
bindings.enable_all(True)
# keyboard mode indicator, upper right corner
self.show_mode_indicator(True, corner='ur')
viewer_vbox = Widgets.VBox() # box containing the viewer
viewer_vbox.set_border_width(2)
viewer_vbox.set_spacing(1)
viewer_widget = Viewers.GingaViewerWidget(viewer=self)
viewer_vbox.add_widget(viewer_widget, stretch=1)
viewer_vbox.add_widget(self.readout, stretch=0) # text directly below the viewer for coordinate display
self.set_callback('cursor-changed', self.motion_cb)
healpix_set = Widgets.TextEntrySet()
healpix_set.add_callback('activated', lambda x: self.set_healpix(event=x))
healpix_set.set_length(6)
candidate_override = Widgets.TextEntrySet()
candidate_override.add_callback('activated', lambda x: self.override_set(event=x))
candidate_override.set_length(10)
astfile = Widgets.TextEntry(editable=True)
astfile.add_callback('activated', lambda x: self.load_astfile(event=x))
catalog = Widgets.TextEntrySet(text='17AQ03')
catalog.add_callback('activated', lambda x: self.set_qrun_id(x))
catalog.set_length(5)
self.accept.add_callback('activated', lambda x: self.accept_reject())
self.reject.add_callback('activated', lambda x: self.accept_reject(rejected=True))
self.load_json.add_callback('activated', lambda x: self.load_candidates())
self.next_set.add_callback('activated', lambda x: self.next())
self.previous_set.add_callback('activated', lambda x: self.previous())
self.clear_button.add_callback('activated', lambda x: self.clear_viewer())
self.reload_button.add_callback('activated', lambda x: self.reload_candidates())
# accept/reject/next buttons
buttons_hbox = Widgets.HBox()
buttons_hbox.add_widget(self.previous_set)
buttons_hbox.add_widget(self.accept)
buttons_hbox.add_widget(self.reject)
buttons_hbox.add_widget(self.next_set)
buttons_hbox.add_widget(self.load_json)
buttons_hbox.add_widget(self.reload_button)
buttons_hbox.add_widget(self.clear_button)
self.load_json.set_enabled(False)
buttons_hbox.set_spacing(3)
# catalog directory text box
catalog_box = Widgets.HBox()
catalog_label = Widgets.Label(text="Set QRUNID:", style='color:red')
catalog_box.add_widget(catalog_label)
catalog_box.add_widget(catalog)
catalog_box.set_margins(15, 0, 10, 0) # top, right, bottom, left
candidates_hbox = Widgets.HBox()
candidate_label = Widgets.Label(text="(Optional) Enter candidate set (HEALPIX): ")
candidates_hbox.add_widget(candidate_label)
candidates_hbox.add_widget(healpix_set)
candidates_hbox.set_margins(15, 0, 15, 0) # top, right, bottom, left
override_hbox = Widgets.HBox()
override_label = Widgets.Label(text="(Optional) Override provisional name: ")
override_hbox.add_widget(override_label)
override_hbox.add_widget(candidate_override)
override_hbox.set_margins(0, 0, 15, 0) # top, right, bottom, left
astfile_hbox = Widgets.HBox()
astfile_hbox_label = Widgets.Label(text="Paste AST file here:")
astfile_hbox.add_widget(astfile_hbox_label)
astfile_hbox.add_widget(astfile)
# button and text entry vbox
buttons_vbox = Widgets.VBox()
buttons_vbox.add_widget(buttons_hbox)
buttons_vbox.add_widget(catalog_box)
buttons_vbox.add_widget(candidates_hbox)
buttons_vbox.add_widget(override_hbox)
buttons_vbox.add_widget(astfile_hbox)
viewer_vbox.add_widget(buttons_vbox) # add buttons below the viewer
viewer_header_hbox = Widgets.HBox() # box containing the viewer/buttons and rightmost text area
viewer_header_hbox.add_widget(viewer_vbox)
viewer_header_hbox.add_widget(Widgets.Label(''))
hbox = Widgets.HBox()
hbox.add_widget(self.header_box)
hbox.add_widget(self.legend)
viewer_header_hbox.add_widget(hbox)
full_vbox = Widgets.VBox() # vbox container for all elements
full_vbox.add_widget(viewer_header_hbox)
full_vbox.add_widget(self.console_box)
self.console_box.set_text('Logging output:\n')
self.header_box.set_text("Header:")
container.set_widget(full_vbox)
container.set_widget(self.warning)
container.set_widget(self.yes_button)
container.set_widget(self.no_button)
self.yes_button.set_enabled(False)
self.no_button.set_enabled(False)
self.buttons_off()
def next(self):
"""
Load the next set of images into the viewer
"""
if self.candidates is not None:
# noinspection PyBroadException
try:
self.buttons_off()
self.obs_number = 0
self.next_image = None
self.clear_candidate_images()
self.candidate = self.candidates.next()
# finding next candidate to load depending on which .ast files are written
while self.ast_exists():
self.candidate = self.candidates.next()
self.logger.info("Loading {}...".format(self.candidate[0].provisional_name))
self.load()
self.buttons_on()
except Exception as ex:
self.logger.info('Loading next candidate set failed.')
if isinstance(ex, StopIteration):
self.logger.info('StopIteration error: End of candidate set.')
self.load_candidates()
# self.logger.info('Hit "Load" button to move onto the next set.')
# self.previous_set.set_enabled(True)
# self.load_json.set_enabled(True)
def ast_exists(self):
"""
Checks if candidate has already been examined with a file written on VOSpace.
length_check is necessary because it means the sub directory exists, if it doesn't an error will be
thrown when looking in the directory list.
:return True is the .ast files exists and there is no viewing override, False otherwise
"""
if self.length_check and self.candidate[0].provisional_name + '.ast' in storage.listdir(
os.path.join(os.path.dirname(storage.DBIMAGES), storage.CATALOG, self.qrun_id,
self.candidates.catalog.catalog.dataset_name), force=True):
if self.override == self.candidate[0].provisional_name:
self.logger.info("Candidate {} being overridden for viewing."
.format(self.candidate[0].provisional_name))
else:
self.logger.info("Candidate {} has been investigated.".format(self.candidate[0].provisional_name))
return True
return False
def previous(self):
"""
Load the previous set of images into the viewer
"""
if self.candidates is not None:
self.buttons_off()
self.obs_number = 0
self.candidate = self.candidates.previous()
if self.candidate is not None:
self.load()
self.buttons_on()
def accept_reject(self, rejected=False):
"""
Accept or reject current observation depending on button press. Write to file and load next set into the viewer
:param rejected: whether the candidate set has been accepted or rejected
"""
self.logger.info("Rejected.") if rejected else self.logger.info("Accepted.")
self.buttons_off()
if self.candidates is not None:
self.write_record(rejected=rejected)
try:
self.next()
except StopIteration:
pass
def set_qrun_id(self, qrun_id):
"""
:param qrun_id: QRUNID in a header file
"""
if hasattr(qrun_id, 'text'):
self.qrun_id = str(qrun_id.text).strip(' ')
self.storage_list = storage.listdir(os.path.join(os.path.dirname(storage.DBIMAGES),
storage.CATALOG,
self.qrun_id), force=True)
self.load_json.set_enabled(True)
self.logger.info("QRUNID set to {}.".format(self.qrun_id))
def lookup(self):
"""
Determines which healpix values is to be examined next. The healpix value is eventually used when creating the
candidate set.
:return healpix value for the candidate files; 0 if no candidate files have been found
"""
count = 0
self.length_check = False
for filename in self.storage_list:
# ex: filename = HPX_00887_RA_203.6_DEC_+58.9_bk.json,
# filename[:-len(storage.MOVING_TARGET_VERSION)] = HPX_00887_RA_203.6_DEC_+58.9
# sub_directory will be the directory where a candidate's .ast files are written
sub_directory = filename[:-len(storage.MOVING_TARGET_VERSION + '.json')]
count += 1
# if the file extension is in the filename, then it is a file containing candidate information
if storage.MOVING_TARGET_VERSION in filename:
x = re.match('(?P<hpx>HPX_)(?P<healpix>\d{5})(?P<leftover>_.*)', filename)
if self.healpix is not None and int(x.group('healpix')) < self.healpix:
continue # skipping over json files until the specified catalog has been reached
# if the sub directory exists, we will have to check that all the candidates have been investigated
elif sub_directory in self.storage_list:
self.length_check = True
# TODO: go back to server for storage_list in case two people are actively writing from unique servers
# cutting down the storage list for further iterating
self.storage_list = self.storage_list[count:]
return int(x.group('healpix'))
return 0
def set_healpix(self, event):
"""
Sets the healpix for the current Candidate set.
:param event: healpix value
"""
if hasattr(event, 'text'):
self.healpix = int(event.text)
self.logger.info("Set healpix as {}".format(self.healpix))
if self.qrun_id is not None:
self.load_json.set_enabled(True)
def load_astfile(self, event):
self.candidate = []
for line in event.text.split('\n'):
# noinspection PyBroadException
try:
obs_record = ObsRecord.from_string(line)
if obs_record is not None:
self.candidate.append(obs_record)
except:
self.logger.warning("Failed to parse line >{}<".format(line))
return
self.logger.info("Accepted AST file.")
self.candidates = [self.candidate]
self.next_set.set_enabled(False)
self.previous_set.set_enabled(False)
self._download_obs_records(self.candidate)
self.load(0)
def override_set(self, event):
"""
Look at the cutout even if it has already been investigated. Primarily used for double checking
accepted candidates.
"""
if hasattr(event, 'text'):
self.override = str(event.text).strip(' ')
self.logger.info("Will override {}.".format(self.override))
def load_candidates(self, healpix=None):
"""
Initial candidates loaded into the viewer. Starts up a thread pool to download images simultaneously.
:param healpix: Catalogue number containing dataset
"""
if healpix is None:
self.healpix = self.lookup()
self.buttons_off()
while self.healpix != 0 and self.set_examined():
self.healpix = self.lookup()
if self.healpix == 0: # base case (when there are no more open candidate sets in the VOSpace directory)
self.logger.info("No more candidate sets for this QRUNID.")
raise StopIteration
self.logger.warning("Launching image prefetching. Please be patient.")
with self.lock:
for obs_records in self.candidates:
self._download_obs_records(obs_records)
self.candidates = candidate.CandidateSet(self.healpix, catalog_dir=self.qrun_id)
self.candidate = None # reset on candidate to clear it of any leftover from previous sets
self.load()
def _download_obs_records(self, record):
"""
Download the observations associated with the current self.candidate set of obsRecords.
:return:
"""
previous_record = None
previous_offset = 2 * storage.CUTOUT_RADIUS
offset = previous_offset
for obs_record in record:
assert isinstance(obs_record, ObsRecord)
key = self.downloader.image_key(obs_record)
if key not in self.image_list:
self.image_list[key] = self.pool.apply_async(self.downloader.get, (obs_record,))
# Check if we should load a comparison for the previous image.
if previous_record is not None:
offset = obs_record.coordinate.separation(previous_record.coordinate)
if offset > storage.CUTOUT_RADIUS and previous_offset > storage.CUTOUT_RADIUS:
# Insert a blank image in the list
previous_key = self.downloader.image_key(previous_record)
try:
comparison = storage.get_comparison_image(previous_record.coordinate,
previous_record.date.mjd,
radius=120/3600.)
frame = "{}{}".format(comparison[0]['observationID'], previous_record.comment.frame)
comparison_obs_record = ObsRecord(null_observation=True,
provisional_name=previous_record.provisional_name,
date=Time(comparison[0]['mjdate'], format='mjd',
precision=5).mpc,
ra=previous_record.coordinate.ra.degree,
dec=previous_record.coordinate.dec.degree,
frame=frame,
comment=previous_key)
key = self.downloader.image_key(comparison_obs_record)
self.null_observation[key] = comparison_obs_record
self.comparison_images[previous_key] = key
if key not in self.image_list:
self.image_list[key] = self.pool.apply_async(self.downloader.get,
(comparison_obs_record,))
except Exception as ex:
self.logger.error("Failed to get comparison image.: {}".format(str(ex)))
return
previous_record = obs_record
previous_offset = offset
# Check if the offset between the last record and the one just before it was large.
if previous_offset > storage.CUTOUT_RADIUS and previous_record is not None:
previous_key = self.downloader.image_key(previous_record)
comparison = storage.get_comparison_image(previous_record.coordinate,
previous_record.date.mjd,
radius=120/3600.0)
frame = "{}{}".format(comparison[0]['observationID'], 'p00')
comparison_obs_record = ObsRecord(null_observation=True,
provisional_name=previous_record.provisional_name,
date=Time(comparison[0]['mjdate'], format='mjd',
precision=5).mpc,
ra=previous_record.coordinate.ra.degree,
dec=previous_record.coordinate.dec.degree,
frame=frame,
comment=previous_key)
key = self.downloader.image_key(comparison_obs_record)
self.null_observation[key] = comparison_obs_record
self.comparison_images[previous_key] = key
if key not in self.image_list:
self.image_list[key] = self.pool.apply_async(self.downloader.get,
(comparison_obs_record,))
def set_examined(self):
"""
Checks if the current json file has been fully examined or not
:return True if the directory is fully examined and there's no override, False if it has not been examined.
"""
self.logger.info("Accepted candidate entry: {}".format(self.healpix))
try:
self.candidates = candidate.CandidateSet(self.healpix, catalog_dir=self.qrun_id)
if self.length_check:
sub_directory = storage.listdir(os.path.join(os.path.dirname(storage.DBIMAGES),
storage.CATALOG,
self.qrun_id,
self.candidates.catalog.catalog.dataset_name), force=True)
if self.override is not None:
filename = self.override+'.ast'
if filename in sub_directory:
self.logger.info("Overriding {}.".format(filename))
return False
else:
count = 0
# counting the total amount of candidates that are in self.candidates
for _ in self.candidates:
count += 1
# re-set self.candidates since the for loop removes all its candidates in a dequeuing fashion
self.candidates = candidate.CandidateSet(self.healpix, catalog_dir=self.qrun_id)
# the amount of files in the accompanying subdirectory for the .json candidate file
directory_length = len(sub_directory)
if count == directory_length:
self.logger.info("Candidate set {} fully examined.".format(self.healpix))
return True
elif count > directory_length:
self.logger.info("Candidate set {} not fully examined.".format(self.healpix))
return False
else:
self.logger.error("Value error: count {} or directory_length {} is out of range."
.format(count, directory_length))
raise ValueError
return False # no length check, therefor no directory has been created and this set isn't examined
except Exception as ex:
self.logger.info("Failed to load candidates: {}".format(str(ex)))
if isinstance(ex, StopIteration):
self.logger.info('StopIteration error. Candidate set might be empty.')
return True # continue with iteration
else:
raise ex
def reload_candidates(self):
"""
Performs a hard reload on all images for the case of loading errors.
Closes current worker pool and reopens a new one.
"""
if self.healpix is not None:
self.logger.info('Reloading all candidates...')
self.pool.terminate()
self.pool = Pool(processes=PROCESSES)
self.buttons_on()
self.set_qrun_id(self.qrun_id)
self.load_candidates(self.healpix)
self.next()
def load(self, obs_number=0):
"""
With the viewing window already created, Creates a FitsImage object and loads its cutout into the window and
displays select header values (see: DISPLAY_KEYWORDS).
Define the center of the first image to be the reference point for aligning the other two images in the set.
:param obs_number: index of which line in the file gets loaded/displayed in the viewer
"""
self._center = None
self.obs_number = obs_number
self._load()
def _load(self):
"""
Loads an image into the viewer, applying appropriate transformations for proper display.
Checks if an HDU has been loaded already and retrieves if needed and then displays that HDU.
Uses multiprocessing techniques for simultaneous downloads and dictionaries to keep track of which images
have been already loaded for faster image switching.
"""
# load the image if not already available, for now we'll put this in here.
if self.candidates is None:
self.logger.info("No candidates loaded.")
return
# loads first candidate
if self.candidate is None:
self.next()
return
key = self.key
while True:
# noinspection PyBroadException
try:
if key not in self.astro_images:
# TODO: MEF
image = AstroImage.AstroImage(logger=self.logger)
image.load_hdu(self.loaded_hdu)
self.astro_images[key] = image
self.set_image(self.astro_images[key])
if self.zoom is not None:
self.zoom_to(self.zoom)
self._rotate()
if self.center is not None:
self._align()
# the image cutout is considered the first object on the canvas, this deletes everything over top of it
self.canvas.delete_objects(self.canvas.get_objects()[1:])
if key not in self.null_observation:
self.mark_aperture()
self.header_box.set_text("Header:" + self.info)
self.logger.info("Loaded: {}".format(self.candidate[self.obs_number].comment.frame))
break
except Exception as ex:
self.logger.info(str(ex))
self.logger.info("Skipping candidate {} due to load failure."
.format(self.candidate[0].provisional_name))
self.next()
break
self._center = WCS(self.header).all_pix2world(self.get_data_size()[0] / 2,
self.get_data_size()[1] / 2, 0)
def mark_aperture(self):
"""
Draws a red circle on the drawing canvas in the viewing window around the celestial object detected.
"""
ra = self.candidate[self.obs_number].coordinate.ra
dec = self.candidate[self.obs_number].coordinate.dec
x, y = WCS(self.header).all_world2pix(ra, dec, 0)
self.canvas.add(self.circle(x, y, radius=10, color='red'))
def write_record(self, rejected=False):
"""
Writing observation lines to a new file.
:param rejected: Whether or not the candidate set contains a valid celestial object
:type rejected: bool
"""
try:
catalog_dir = os.path.join(storage.CATALOG,
self.qrun_id,
self.candidates.catalog.catalog.dataset_name)
art = storage.ASTRecord(self.candidate[0].provisional_name,
version='',
catalog_dir=catalog_dir)
with open(art.filename, 'w+') as fobj:
for ob in self.candidate:
if rejected:
ob.null_observation = True
fobj.write(ob.to_string() + '\n')
self.logger.info("Queuing job to write file to VOSpace.")
with self.lock:
try:
if rejected:
self.remove_check(art)
elif not rejected:
self.pool.apply_async(self.accepted_list, (art,))
self.logger.info(
"Done Queuing {} for VOSpace write.".format(self.candidate[0].provisional_name + ".ast"))
except Exception as ex:
self.logger.info("Failed to write file {}: {}".format(
self.candidate[0].provisional_name, str(ex)))
except IOError as ex:
self.logger.info("Unable to write to file.")
self.logger.info(str(ex))
raise ex
def remove_check(self, art, ext='.ast'):
"""
Checks a file's existence in its /accepted/ VOSpace directory. If the uri can't be found, there must not be
a file in it's accepted directory, so a standard null observation file is uploaded.
Prompts user to make a decision if the file already exists.
:param art: artifact who's being checked for existence
:param ext: file type
"""
# noinspection PyBroadException
try:
accepted_uri = os.path.join(os.path.join(os.path.dirname(storage.DBIMAGES), storage.CATALOG),
self.header['QRUNID'], ACCEPTED_DIRECTORY, art.observation.dataset_name + ext)
if storage.exists(accepted_uri):
self.yes_button.add_callback('activated', lambda x: self.move_accepted(accepted_uri, art))
self.no_button.add_callback('activated', lambda x: self.warning_label_reset())
self.yes_button.set_enabled(True)
self.no_button.set_enabled(True)
self.logger.warning("File already accepted.")
self.warning.set_text("FILE {} HAS ALREADY BEEN ACCEPTED, ARE YOU SURE YOU WANT TO REJECT IT?"
.format(art.observation.dataset_name))
self.warning.set_color(fg='white', bg='red')
except Exception as ex:
if isinstance(ex, NotFoundException):
self.write_rejected(art)
else:
self.logger.error(str(ex))
raise ex
def move_accepted(self, accepted_uri, art):
"""
Deletes the file at the uri and queue's a thread to write the file in a new destination as a rejected
observation. Disables buttons and resets label.
:param accepted_uri: uri of the accepted file
:param art: artifact object for the record being examined
"""
storage.delete(accepted_uri)
self.logger.info("Deleted {}".format(accepted_uri))
self.write_rejected(art)
self.warning_label_reset()
def warning_label_reset(self):
"""
Method that serves as a callback destination. Disables yes/no buttons and resets label text.
"""
self.yes_button.set_enabled(False)
self.no_button.set_enabled(False)
self.warning.set_text("In case you try to reject a previously accepted candidate: ")
self.warning.set_color(fg='black', bg='white')
def write_rejected(self, art):
"""
Start a thread to write the rejected artifact to its uri
:param art: Artifact object
"""
self.pool.apply_async(self.downloader.put, (art,))
self.logger.info("Done Queuing {} for VOSpace write {}".format(self.candidate[0].provisional_name + ".ast",
art.uri))
def accepted_list(self, art, ext='.ast'):
"""
Places accepted .ast file in an accepted folder in its QRUNID section on VOSpace
:param art: Artifact object containing the proper file name
:param ext: file extension
"""
# 'vos:cfis/solar_system/dbimages/catalogs/<QRUNID>/accepted/<dataset_name>.ast
# Since this just uploads an unintuitive name in the directory, perhaps the path could be changed to
# ../accepted/<healpix>/<dataset_name>.ast
destination = os.path.join(os.path.join(os.path.dirname(storage.DBIMAGES), storage.CATALOG),
self.header['QRUNID'], ACCEPTED_DIRECTORY, art.observation.dataset_name + ext)
try:
storage.make_path(destination)
storage.copy(art.filename, destination)
except Exception as ex:
self.logger.info("Failed writing to accepted directory for {}: {}"
.format(art.observation.dataset_name, str(ex)))
raise ex
def _rotate(self):
"""
Rotates the current viewer image to be oriented North up East left. This is done by taking outward vectors from
the origin and using their WCS values to determine the original orientation of the image. Images are then
flipped/rotated accordingly to be North up East left.
"""
wcs = WCS(self.header)
self.transform(False, False, False)
x = wcs.all_pix2world([[0, 0], [1, 1], [1, 0]], 0)
ra1 = x[0][0]
ra2 = x[1][0]
ra3 = x[2][0]
dec1 = x[0][1]
dec2 = x[1][1]
dec3 = x[2][1]
delta_x = ra2 - ra1
delta_y = dec2 - dec1
flip_x = 1
flip_y = 1
if not delta_x < 0:
flip_x = -1
if not delta_y > 0:
flip_y = -1
self.transform(True, True, False) # def transform(self, flip_x, flip_y, swap_xy):
else:
self.transform(True, False, False)
elif not delta_y > 0:
flip_y = -1
self.transform(False, True, False)
delta_delta = (dec3 - dec1) * flip_y
delta_ra = (ra1 - ra3) * flip_x
theta = degrees(atan2(delta_delta, delta_ra))
self.rotate(theta)
def _align(self):
"""
Aligns images via panning so their backgrounds stay consistent. Images requiring a pan greater than 1/2 the
loaded image will be ignored.
"""
x, y = WCS(self.header).all_world2pix(self.center[0], self.center[1], 0)
if not(0 < x < self.get_data_size()[0] and 0 < y < self.get_data_size()[1]):
self.logger.info("Pan out of range: ({}, {}) is greater than half the viewing window.".format(x, y))
else:
self.set_pan(x, y)
def _key_press(self, canvas, keyname, opn, viewer):
"""
Method called once a keyboard stoke has been detected. Using two un-bound keys, f & g, to cycle different
cutout hdu's from the ObsRecord.
Parameters canvas, opn, and viewer are all needed for the method to be called even though they are not
directly used.
:param canvas: Ginga DrawingCanvas Object
:param keyname: Name of the key that has been pressed
:param opn: str "key"
:param viewer: Ginga EnhancedCanvasView object
"""
self.logger.debug("Got key: {} from canvas: {} with opn: {} from viewer: {}".format(canvas,
keyname,
opn,
viewer))
if self.candidate is None:
self.next()
return
# Only step back if we aren't looking at a comparison images (as determined by the next_image keyword)
if keyname == 'f':
if self.next_image is not None:
self.next_image = None
else:
self.obs_number -= 1
key = self.downloader.image_key(self.candidate[self.obs_number])
if key in self.comparison_images:
self.next_image = self.comparison_images[key]
# Only step forward if this images doesn't have comparison image in the comparison image list.
elif keyname == 'g':
key = self.downloader.image_key(self.candidate[self.obs_number])
if key in self.comparison_images and self.next_image is None:
self.next_image = self.comparison_images[key]
else:
self.next_image = None
self.obs_number += 1
self.zoom = self.get_zoom()
if self.candidate is not None:
self.obs_number %= len(self.candidate)
self._load()
def clear_candidate_images(self):
"""
Clear all the images associated with a candidate.
"""
if self.candidate is None:
return
for obs_record in self.candidate:
key = self.downloader.image_key(obs_record)
if key in self.image_list:
del(self.image_list[key])
if key in self.astro_images:
del(self.astro_images[key])
if key in self.comparison_images:
comp_key = self.comparison_images[key]
if comp_key in self.comparison_images:
del(self.image_list[comp_key])
if comp_key in self.astro_images:
del(self.astro_images[comp_key])
def clear_viewer(self):
"""
Clear the image in the viewer and any other objects drawn on the canvas.g
"""
self.clear()
self.canvas.delete_objects(self.canvas.get_objects())
def buttons_on(self):
"""
Activate most GUI buttons
"""
self.next_set.set_enabled(True)
self.previous_set.set_enabled(True)
self.accept.set_enabled(True)
self.reject.set_enabled(True)
self.clear_button.set_enabled(True)
self.load_json.set_enabled(True)
self.reload_button.set_enabled(True)
def buttons_off(self):
"""
Deactivate some GUI buttons
"""
self.next_set.set_enabled(False)
self.previous_set.set_enabled(False)
self.accept.set_enabled(False)
self.reject.set_enabled(False)
self.clear_button.set_enabled(False)
self.load_json.set_enabled(False)
self.reload_button.set_enabled(False)
@property
def center(self):
"""
Returns the center of the image in ra/dec coordinates
"""
if self._center is not None:
return self._center
@property
def key(self):
if self.next_image is not None:
key = self.next_image
else:
key = self.downloader.image_key(self.candidate[self.obs_number])
return key
@property
def loaded_hdu(self):
"""
Return current HDU
"""
# TODO: MEF
key = self.key
with self.lock:
hdu = (isinstance(self.image_list[key], ApplyResult) and self.image_list[key].get()
or self.image_list[key])
if isinstance(hdu, ApplyResult):
self.logger.info("Loaded HDU is Apply result instance, not an HDU.")
raise TypeError
self.image_list[key] = hdu
load_hdu = max_size = None
for hdu in self.image_list[key]:
if hdu.header['NAXIS'] == 0:
continue
size = hdu.header['NAXIS1'] * hdu.header['NAXIS2']
if max_size is None or size > max_size:
max_size = size
load_hdu = hdu
return load_hdu
@property
def header(self):
"""
Return current HDU's header
"""
return self.astro_images[self.key].get_header()
@property
def info(self):
return "\n".join([x + " = " + str(self.header.get(x, "UNKNOWN")) for x in DISPLAY_KEYWORDS])
def main(params):
ginga_logger = log.get_logger("ginga", options=params)
ginga_logger.addHandler(logging.FileHandler(filename=tempfile.NamedTemporaryFile(prefix='ginga',
delete=False).name))
if params.use_opencv:
from ginga import trcalc
try:
trcalc.use('opencv')
except Exception as ex:
ginga_logger.warning("Error using OpenCL: {}".format(ex))
if params.use_opencl:
from ginga import trcalc
try:
trcalc.use('opencl')
except Exception as ex:
ginga_logger.warning("Error using OpenCL: {}".format(ex))
app = Widgets.Application(logger=ginga_logger, host=params.host, port=params.port)
# create top level window
window = app.make_window("Validate", wid='Validate')
daomop_logger = logging.getLogger('daomop')
if hasattr(params, 'loglevel'):
daomop_logger.setLevel(params.loglevel)
# our own viewer object, customized with methods (see above)
ValidateGui(daomop_logger, window)
try:
app.start()
except KeyboardInterrupt:
ginga_logger.info("Terminating viewer...")
window.close()
| gpl-3.0 | -3,999,884,930,713,838,000 | 41.246493 | 119 | 0.568284 | false |
bratsche/Neutron-Drive | google_appengine/lib/django_1_3/tests/modeltests/model_forms/models.py | 49 | 54171 | """
XX. Generating HTML forms from models
This is mostly just a reworking of the ``form_for_model``/``form_for_instance``
tests to use ``ModelForm``. As such, the text may not make sense in all cases,
and the examples are probably a poor fit for the ``ModelForm`` syntax. In other
words, most of these tests should be rewritten.
"""
import os
import tempfile
from django.db import models
from django.core.files.storage import FileSystemStorage
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
ARTICLE_STATUS = (
(1, 'Draft'),
(2, 'Pending'),
(3, 'Live'),
)
ARTICLE_STATUS_CHAR = (
('d', 'Draft'),
('p', 'Pending'),
('l', 'Live'),
)
class Category(models.Model):
name = models.CharField(max_length=20)
slug = models.SlugField(max_length=20)
url = models.CharField('The URL', max_length=40)
def __unicode__(self):
return self.name
class Writer(models.Model):
name = models.CharField(max_length=50, help_text='Use both first and last names.')
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class Article(models.Model):
headline = models.CharField(max_length=50)
slug = models.SlugField()
pub_date = models.DateField()
created = models.DateField(editable=False)
writer = models.ForeignKey(Writer)
article = models.TextField()
categories = models.ManyToManyField(Category, blank=True)
status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True)
def save(self):
import datetime
if not self.id:
self.created = datetime.date.today()
return super(Article, self).save()
def __unicode__(self):
return self.headline
class ImprovedArticle(models.Model):
article = models.OneToOneField(Article)
class ImprovedArticleWithParentLink(models.Model):
article = models.OneToOneField(Article, parent_link=True)
class BetterWriter(Writer):
score = models.IntegerField()
class WriterProfile(models.Model):
writer = models.OneToOneField(Writer, primary_key=True)
age = models.PositiveIntegerField()
def __unicode__(self):
return "%s is %s" % (self.writer, self.age)
from django.contrib.localflavor.us.models import PhoneNumberField
class PhoneNumber(models.Model):
phone = PhoneNumberField()
description = models.CharField(max_length=20)
def __unicode__(self):
return self.phone
class TextFile(models.Model):
description = models.CharField(max_length=20)
file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15)
def __unicode__(self):
return self.description
try:
# If PIL is available, try testing ImageFields. Checking for the existence
# of Image is enough for CPython, but for PyPy, you need to check for the
# underlying modules If PIL is not available, ImageField tests are omitted.
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image, _imaging
except ImportError:
import Image, _imaging
test_images = True
class ImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
# Deliberately put the image field *after* the width/height fields to
# trigger the bug in #10404 with width/height not getting assigned.
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height')
path = models.CharField(max_length=16, blank=True, default='')
def __unicode__(self):
return self.description
class OptionalImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height',
blank=True, null=True)
width = models.IntegerField(editable=False, null=True)
height = models.IntegerField(editable=False, null=True)
path = models.CharField(max_length=16, blank=True, default='')
def __unicode__(self):
return self.description
except ImportError:
test_images = False
class CommaSeparatedInteger(models.Model):
field = models.CommaSeparatedIntegerField(max_length=20)
def __unicode__(self):
return self.field
class Product(models.Model):
slug = models.SlugField(unique=True)
def __unicode__(self):
return self.slug
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __unicode__(self):
return u"%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class ArticleStatus(models.Model):
status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True)
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class Book(models.Model):
title = models.CharField(max_length=40)
author = models.ForeignKey(Writer, blank=True, null=True)
special_id = models.IntegerField(blank=True, null=True, unique=True)
class Meta:
unique_together = ('title', 'author')
class BookXtra(models.Model):
isbn = models.CharField(max_length=16, unique=True)
suffix1 = models.IntegerField(blank=True, default=0)
suffix2 = models.IntegerField(blank=True, default=0)
class Meta:
unique_together = (('suffix1', 'suffix2'))
abstract = True
class DerivedBook(Book, BookXtra):
pass
class ExplicitPK(models.Model):
key = models.CharField(max_length=20, primary_key=True)
desc = models.CharField(max_length=20, blank=True, unique=True)
class Meta:
unique_together = ('key', 'desc')
def __unicode__(self):
return self.key
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __unicode__(self):
return self.name
class DerivedPost(Post):
pass
class BigInt(models.Model):
biggie = models.BigIntegerField()
def __unicode__(self):
return unicode(self.biggie)
class MarkupField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 20
super(MarkupField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
# don't allow this field to be used in form (real use-case might be
# that you know the markup will always be X, but it is among an app
# that allows the user to say it could be something else)
# regressed at r10062
return None
class CustomFieldForExclusionModel(models.Model):
name = models.CharField(max_length=10)
markup = MarkupField()
class FlexibleDatePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField(blank=True, null=True)
__test__ = {'API_TESTS': """
>>> from django import forms
>>> from django.forms.models import ModelForm, model_to_dict
>>> from django.core.files.uploadedfile import SimpleUploadedFile
The bare bones, absolutely nothing custom, basic case.
>>> class CategoryForm(ModelForm):
... class Meta:
... model = Category
>>> CategoryForm.base_fields.keys()
['name', 'slug', 'url']
Extra fields.
>>> class CategoryForm(ModelForm):
... some_extra_field = forms.BooleanField()
...
... class Meta:
... model = Category
>>> CategoryForm.base_fields.keys()
['name', 'slug', 'url', 'some_extra_field']
Extra field that has a name collision with a related object accessor.
>>> class WriterForm(ModelForm):
... book = forms.CharField(required=False)
...
... class Meta:
... model = Writer
>>> wf = WriterForm({'name': 'Richard Lockridge'})
>>> wf.is_valid()
True
Replacing a field.
>>> class CategoryForm(ModelForm):
... url = forms.BooleanField()
...
... class Meta:
... model = Category
>>> CategoryForm.base_fields['url'].__class__
<class 'django.forms.fields.BooleanField'>
Using 'fields'.
>>> class CategoryForm(ModelForm):
...
... class Meta:
... model = Category
... fields = ['url']
>>> CategoryForm.base_fields.keys()
['url']
Using 'exclude'
>>> class CategoryForm(ModelForm):
...
... class Meta:
... model = Category
... exclude = ['url']
>>> CategoryForm.base_fields.keys()
['name', 'slug']
Using 'fields' *and* 'exclude'. Not sure why you'd want to do this, but uh,
"be liberal in what you accept" and all.
>>> class CategoryForm(ModelForm):
...
... class Meta:
... model = Category
... fields = ['name', 'url']
... exclude = ['url']
>>> CategoryForm.base_fields.keys()
['name']
Using 'widgets'
>>> class CategoryForm(ModelForm):
...
... class Meta:
... model = Category
... fields = ['name', 'url', 'slug']
... widgets = {
... 'name': forms.Textarea,
... 'url': forms.TextInput(attrs={'class': 'url'})
... }
>>> str(CategoryForm()['name'])
'<textarea id="id_name" rows="10" cols="40" name="name"></textarea>'
>>> str(CategoryForm()['url'])
'<input id="id_url" type="text" class="url" name="url" maxlength="40" />'
>>> str(CategoryForm()['slug'])
'<input id="id_slug" type="text" name="slug" maxlength="20" />'
Don't allow more than one 'model' definition in the inheritance hierarchy.
Technically, it would generate a valid form, but the fact that the resulting
save method won't deal with multiple objects is likely to trip up people not
familiar with the mechanics.
>>> class CategoryForm(ModelForm):
... class Meta:
... model = Category
>>> class OddForm(CategoryForm):
... class Meta:
... model = Article
OddForm is now an Article-related thing, because BadForm.Meta overrides
CategoryForm.Meta.
>>> OddForm.base_fields.keys()
['headline', 'slug', 'pub_date', 'writer', 'article', 'status', 'categories']
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
First class with a Meta class wins.
>>> class BadForm(ArticleForm, CategoryForm):
... pass
>>> OddForm.base_fields.keys()
['headline', 'slug', 'pub_date', 'writer', 'article', 'status', 'categories']
Subclassing without specifying a Meta on the class will use the parent's Meta
(or the first parent in the MRO if there are multiple parent classes).
>>> class CategoryForm(ModelForm):
... class Meta:
... model = Category
>>> class SubCategoryForm(CategoryForm):
... pass
>>> SubCategoryForm.base_fields.keys()
['name', 'slug', 'url']
We can also subclass the Meta inner class to change the fields list.
>>> class CategoryForm(ModelForm):
... checkbox = forms.BooleanField()
...
... class Meta:
... model = Category
>>> class SubCategoryForm(CategoryForm):
... class Meta(CategoryForm.Meta):
... exclude = ['url']
>>> print SubCategoryForm()
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th><td><input type="checkbox" name="checkbox" id="id_checkbox" /></td></tr>
# test using fields to provide ordering to the fields
>>> class CategoryForm(ModelForm):
... class Meta:
... model = Category
... fields = ['url', 'name']
>>> CategoryForm.base_fields.keys()
['url', 'name']
>>> print CategoryForm()
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
>>> class CategoryForm(ModelForm):
... class Meta:
... model = Category
... fields = ['slug', 'url', 'name']
... exclude = ['url']
>>> CategoryForm.base_fields.keys()
['slug', 'name']
# Old form_for_x tests #######################################################
>>> from django.forms import ModelForm, CharField
>>> import datetime
>>> Category.objects.all()
[]
>>> class CategoryForm(ModelForm):
... class Meta:
... model = Category
>>> f = CategoryForm()
>>> print f
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
>>> print f.as_ul()
<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" /></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" /></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" /></li>
>>> print f['name']
<input id="id_name" type="text" name="name" maxlength="20" />
>>> f = CategoryForm(auto_id=False)
>>> print f.as_ul()
<li>Name: <input type="text" name="name" maxlength="20" /></li>
<li>Slug: <input type="text" name="slug" maxlength="20" /></li>
<li>The URL: <input type="text" name="url" maxlength="40" /></li>
>>> f = CategoryForm({'name': 'Entertainment', 'slug': 'entertainment', 'url': 'entertainment'})
>>> f.is_valid()
True
>>> f.cleaned_data['url']
u'entertainment'
>>> f.cleaned_data['name']
u'Entertainment'
>>> f.cleaned_data['slug']
u'entertainment'
>>> c1 = f.save()
>>> c1
<Category: Entertainment>
>>> Category.objects.all()
[<Category: Entertainment>]
>>> f = CategoryForm({'name': "It's a test", 'slug': 'its-test', 'url': 'test'})
>>> f.is_valid()
True
>>> f.cleaned_data['url']
u'test'
>>> f.cleaned_data['name']
u"It's a test"
>>> f.cleaned_data['slug']
u'its-test'
>>> c2 = f.save()
>>> c2
<Category: It's a test>
>>> Category.objects.order_by('name')
[<Category: Entertainment>, <Category: It's a test>]
If you call save() with commit=False, then it will return an object that
hasn't yet been saved to the database. In this case, it's up to you to call
save() on the resulting model instance.
>>> f = CategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
>>> f.is_valid()
True
>>> f.cleaned_data['url']
u'third'
>>> f.cleaned_data['name']
u'Third test'
>>> f.cleaned_data['slug']
u'third-test'
>>> c3 = f.save(commit=False)
>>> c3
<Category: Third test>
>>> Category.objects.order_by('name')
[<Category: Entertainment>, <Category: It's a test>]
>>> c3.save()
>>> Category.objects.order_by('name')
[<Category: Entertainment>, <Category: It's a test>, <Category: Third test>]
If you call save() with invalid data, you'll get a ValueError.
>>> f = CategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
>>> f.errors['name']
[u'This field is required.']
>>> f.errors['slug']
[u"Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."]
>>> f.cleaned_data
Traceback (most recent call last):
...
AttributeError: 'CategoryForm' object has no attribute 'cleaned_data'
>>> f.save()
Traceback (most recent call last):
...
ValueError: The Category could not be created because the data didn't validate.
>>> f = CategoryForm({'name': '', 'slug': '', 'url': 'foo'})
>>> f.save()
Traceback (most recent call last):
...
ValueError: The Category could not be created because the data didn't validate.
Create a couple of Writers.
>>> w_royko = Writer(name='Mike Royko')
>>> w_royko.save()
>>> w_woodward = Writer(name='Bob Woodward')
>>> w_woodward.save()
ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
fields with the 'choices' attribute are represented by a ChoiceField.
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = ArticleForm(auto_id=False)
>>> print f
<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
<tr><th>Writer:</th><td><select name="writer">
<option value="" selected="selected">---------</option>
<option value="...">Bob Woodward</option>
<option value="...">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article"></textarea></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>
<tr><th>Categories:</th><td><select multiple="multiple" name="categories">
<option value="...">Entertainment</option>
<option value="...">It's a test</option>
<option value="...">Third test</option>
</select><br /><span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></td></tr>
You can restrict a form to a subset of the complete list of fields
by providing a 'fields' argument. If you try to save a
model created with such a form, you need to ensure that the fields
that are _not_ on the form have default values, or are allowed to have
a value of None. If a field isn't specified on a form, the object created
from the form can't provide a value for that field!
>>> class PartialArticleForm(ModelForm):
... class Meta:
... model = Article
... fields = ('headline','pub_date')
>>> f = PartialArticleForm(auto_id=False)
>>> print f
<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
When the ModelForm is passed an instance, that instance's current values are
inserted as 'initial' data in each Field.
>>> w = Writer.objects.get(name='Mike Royko')
>>> class RoykoForm(ModelForm):
... class Meta:
... model = Writer
>>> f = RoykoForm(auto_id=False, instance=w)
>>> print f
<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" /><br /><span class="helptext">Use both first and last names.</span></td></tr>
>>> art = Article(headline='Test article', slug='test-article', pub_date=datetime.date(1988, 1, 4), writer=w, article='Hello.')
>>> art.save()
>>> art_id_1 = art.id
>>> art_id_1 is not None
True
>>> class TestArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = TestArticleForm(auto_id=False, instance=art)
>>> print f.as_ul()
<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="...">Bob Woodward</option>
<option value="..." selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="...">Entertainment</option>
<option value="...">It's a test</option>
<option value="...">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
>>> f = TestArticleForm({'headline': u'Test headline', 'slug': 'test-headline', 'pub_date': u'1984-02-06', 'writer': unicode(w_royko.pk), 'article': 'Hello.'}, instance=art)
>>> f.errors
{}
>>> f.is_valid()
True
>>> test_art = f.save()
>>> test_art.id == art_id_1
True
>>> test_art = Article.objects.get(id=art_id_1)
>>> test_art.headline
u'Test headline'
You can create a form over a subset of the available fields
by specifying a 'fields' argument to form_for_instance.
>>> class PartialArticleForm(ModelForm):
... class Meta:
... model = Article
... fields=('headline', 'slug', 'pub_date')
>>> f = PartialArticleForm({'headline': u'New headline', 'slug': 'new-headline', 'pub_date': u'1988-01-04'}, auto_id=False, instance=art)
>>> print f.as_ul()
<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
>>> f.is_valid()
True
>>> new_art = f.save()
>>> new_art.id == art_id_1
True
>>> new_art = Article.objects.get(id=art_id_1)
>>> new_art.headline
u'New headline'
Add some categories and test the many-to-many form output.
>>> new_art.categories.all()
[]
>>> new_art.categories.add(Category.objects.get(name='Entertainment'))
>>> new_art.categories.all()
[<Category: Entertainment>]
>>> class TestArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = TestArticleForm(auto_id=False, instance=new_art)
>>> print f.as_ul()
<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="...">Bob Woodward</option>
<option value="..." selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="..." selected="selected">Entertainment</option>
<option value="...">It's a test</option>
<option value="...">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
Initial values can be provided for model forms
>>> f = TestArticleForm(auto_id=False, initial={'headline': 'Your headline here', 'categories': [str(c1.id), str(c2.id)]})
>>> print f.as_ul()
<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="...">Bob Woodward</option>
<option value="...">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="..." selected="selected">Entertainment</option>
<option value="..." selected="selected">It's a test</option>
<option value="...">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
>>> f = TestArticleForm({'headline': u'New headline', 'slug': u'new-headline', 'pub_date': u'1988-01-04',
... 'writer': unicode(w_royko.pk), 'article': u'Hello.', 'categories': [unicode(c1.id), unicode(c2.id)]}, instance=new_art)
>>> new_art = f.save()
>>> new_art.id == art_id_1
True
>>> new_art = Article.objects.get(id=art_id_1)
>>> new_art.categories.order_by('name')
[<Category: Entertainment>, <Category: It's a test>]
Now, submit form data with no categories. This deletes the existing categories.
>>> f = TestArticleForm({'headline': u'New headline', 'slug': u'new-headline', 'pub_date': u'1988-01-04',
... 'writer': unicode(w_royko.pk), 'article': u'Hello.'}, instance=new_art)
>>> new_art = f.save()
>>> new_art.id == art_id_1
True
>>> new_art = Article.objects.get(id=art_id_1)
>>> new_art.categories.all()
[]
Create a new article, with categories, via the form.
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = ArticleForm({'headline': u'The walrus was Paul', 'slug': u'walrus-was-paul', 'pub_date': u'1967-11-01',
... 'writer': unicode(w_royko.pk), 'article': u'Test.', 'categories': [unicode(c1.id), unicode(c2.id)]})
>>> new_art = f.save()
>>> art_id_2 = new_art.id
>>> art_id_2 not in (None, art_id_1)
True
>>> new_art = Article.objects.get(id=art_id_2)
>>> new_art.categories.order_by('name')
[<Category: Entertainment>, <Category: It's a test>]
Create a new article, with no categories, via the form.
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = ArticleForm({'headline': u'The walrus was Paul', 'slug': u'walrus-was-paul', 'pub_date': u'1967-11-01',
... 'writer': unicode(w_royko.pk), 'article': u'Test.'})
>>> new_art = f.save()
>>> art_id_3 = new_art.id
>>> art_id_3 not in (None, art_id_1, art_id_2)
True
>>> new_art = Article.objects.get(id=art_id_3)
>>> new_art.categories.all()
[]
Create a new article, with categories, via the form, but use commit=False.
The m2m data won't be saved until save_m2m() is invoked on the form.
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = ArticleForm({'headline': u'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': u'1967-11-01',
... 'writer': unicode(w_royko.pk), 'article': u'Test.', 'categories': [unicode(c1.id), unicode(c2.id)]})
>>> new_art = f.save(commit=False)
# Manually save the instance
>>> new_art.save()
>>> art_id_4 = new_art.id
>>> art_id_4 not in (None, art_id_1, art_id_2, art_id_3)
True
# The instance doesn't have m2m data yet
>>> new_art = Article.objects.get(id=art_id_4)
>>> new_art.categories.all()
[]
# Save the m2m data on the form
>>> f.save_m2m()
>>> new_art.categories.order_by('name')
[<Category: Entertainment>, <Category: It's a test>]
Here, we define a custom ModelForm. Because it happens to have the same fields as
the Category model, we can just call the form's save() to apply its changes to an
existing Category instance.
>>> class ShortCategory(ModelForm):
... name = CharField(max_length=5)
... slug = CharField(max_length=5)
... url = CharField(max_length=3)
>>> cat = Category.objects.get(name='Third test')
>>> cat
<Category: Third test>
>>> cat.id == c3.id
True
>>> form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
>>> form.save()
<Category: Third>
>>> Category.objects.get(id=c3.id)
<Category: Third>
Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
at runtime, based on the data in the database when the form is displayed, not
the data in the database when the form is instantiated.
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = ArticleForm(auto_id=False)
>>> print f.as_ul()
<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="...">Bob Woodward</option>
<option value="...">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="...">Entertainment</option>
<option value="...">It's a test</option>
<option value="...">Third</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
>>> c4 = Category.objects.create(name='Fourth', url='4th')
>>> c4
<Category: Fourth>
>>> Writer.objects.create(name='Carl Bernstein')
<Writer: Carl Bernstein>
>>> print f.as_ul()
<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="...">Bob Woodward</option>
<option value="...">Carl Bernstein</option>
<option value="...">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="...">Entertainment</option>
<option value="...">It's a test</option>
<option value="...">Third</option>
<option value="...">Fourth</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
# ModelChoiceField ############################################################
>>> from django.forms import ModelChoiceField, ModelMultipleChoiceField
>>> f = ModelChoiceField(Category.objects.all())
>>> list(f.choices)
[(u'', u'---------'), (..., u'Entertainment'), (..., u"It's a test"), (..., u'Third'), (..., u'Fourth')]
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(0)
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. That choice is not one of the available choices.']
>>> f.clean(c3.id)
<Category: Third>
>>> f.clean(c2.id)
<Category: It's a test>
# Add a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
>>> c5 = Category.objects.create(name='Fifth', url='5th')
>>> c5
<Category: Fifth>
>>> f.clean(c5.id)
<Category: Fifth>
# Delete a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
>>> Category.objects.get(url='5th').delete()
>>> f.clean(c5.id)
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. That choice is not one of the available choices.']
>>> f = ModelChoiceField(Category.objects.filter(pk=c1.id), required=False)
>>> print f.clean('')
None
>>> f.clean('')
>>> f.clean(str(c1.id))
<Category: Entertainment>
>>> f.clean('100')
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. That choice is not one of the available choices.']
# queryset can be changed after the field is created.
>>> f.queryset = Category.objects.exclude(name='Fourth')
>>> list(f.choices)
[(u'', u'---------'), (..., u'Entertainment'), (..., u"It's a test"), (..., u'Third')]
>>> f.clean(c3.id)
<Category: Third>
>>> f.clean(c4.id)
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. That choice is not one of the available choices.']
# check that we can safely iterate choices repeatedly
>>> gen_one = list(f.choices)
>>> gen_two = f.choices
>>> gen_one[2]
(..., u"It's a test")
>>> list(gen_two)
[(u'', u'---------'), (..., u'Entertainment'), (..., u"It's a test"), (..., u'Third')]
# check that we can override the label_from_instance method to print custom labels (#4620)
>>> f.queryset = Category.objects.all()
>>> f.label_from_instance = lambda obj: "category " + str(obj)
>>> list(f.choices)
[(u'', u'---------'), (..., 'category Entertainment'), (..., "category It's a test"), (..., 'category Third'), (..., 'category Fourth')]
# ModelMultipleChoiceField ####################################################
>>> f = ModelMultipleChoiceField(Category.objects.all())
>>> list(f.choices)
[(..., u'Entertainment'), (..., u"It's a test"), (..., u'Third'), (..., u'Fourth')]
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean([])
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean([c1.id])
[<Category: Entertainment>]
>>> f.clean([c2.id])
[<Category: It's a test>]
>>> f.clean([str(c1.id)])
[<Category: Entertainment>]
>>> f.clean([str(c1.id), str(c2.id)])
[<Category: Entertainment>, <Category: It's a test>]
>>> f.clean([c1.id, str(c2.id)])
[<Category: Entertainment>, <Category: It's a test>]
>>> f.clean((c1.id, str(c2.id)))
[<Category: Entertainment>, <Category: It's a test>]
>>> f.clean(['100'])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 100 is not one of the available choices.']
>>> f.clean('hello')
Traceback (most recent call last):
...
ValidationError: [u'Enter a list of values.']
>>> f.clean(['fail'])
Traceback (most recent call last):
...
ValidationError: [u'"fail" is not a valid value for a primary key.']
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
>>> c6 = Category.objects.create(id=6, name='Sixth', url='6th')
>>> c6
<Category: Sixth>
>>> f.clean([c6.id])
[<Category: Sixth>]
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
>>> Category.objects.get(url='6th').delete()
>>> f.clean([c6.id])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 6 is not one of the available choices.']
>>> f = ModelMultipleChoiceField(Category.objects.all(), required=False)
>>> f.clean([])
[]
>>> f.clean(())
[]
>>> f.clean(['10'])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 10 is not one of the available choices.']
>>> f.clean([str(c3.id), '10'])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 10 is not one of the available choices.']
>>> f.clean([str(c1.id), '10'])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 10 is not one of the available choices.']
# queryset can be changed after the field is created.
>>> f.queryset = Category.objects.exclude(name='Fourth')
>>> list(f.choices)
[(..., u'Entertainment'), (..., u"It's a test"), (..., u'Third')]
>>> f.clean([c3.id])
[<Category: Third>]
>>> f.clean([c4.id])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. ... is not one of the available choices.']
>>> f.clean([str(c3.id), str(c4.id)])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. ... is not one of the available choices.']
>>> f.queryset = Category.objects.all()
>>> f.label_from_instance = lambda obj: "multicategory " + str(obj)
>>> list(f.choices)
[(..., 'multicategory Entertainment'), (..., "multicategory It's a test"), (..., 'multicategory Third'), (..., 'multicategory Fourth')]
# OneToOneField ###############################################################
>>> class ImprovedArticleForm(ModelForm):
... class Meta:
... model = ImprovedArticle
>>> ImprovedArticleForm.base_fields.keys()
['article']
>>> class ImprovedArticleWithParentLinkForm(ModelForm):
... class Meta:
... model = ImprovedArticleWithParentLink
>>> ImprovedArticleWithParentLinkForm.base_fields.keys()
[]
>>> bw = BetterWriter(name=u'Joe Better', score=10)
>>> bw.save()
>>> sorted(model_to_dict(bw).keys())
['id', 'name', 'score', 'writer_ptr']
>>> class BetterWriterForm(ModelForm):
... class Meta:
... model = BetterWriter
>>> form = BetterWriterForm({'name': 'Some Name', 'score': 12})
>>> form.is_valid()
True
>>> bw2 = form.save()
>>> bw2.delete()
>>> class WriterProfileForm(ModelForm):
... class Meta:
... model = WriterProfile
>>> form = WriterProfileForm()
>>> print form.as_p()
<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="" selected="selected">---------</option>
<option value="...">Bob Woodward</option>
<option value="...">Carl Bernstein</option>
<option value="...">Joe Better</option>
<option value="...">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="text" name="age" id="id_age" /></p>
>>> data = {
... 'writer': unicode(w_woodward.pk),
... 'age': u'65',
... }
>>> form = WriterProfileForm(data)
>>> instance = form.save()
>>> instance
<WriterProfile: Bob Woodward is 65>
>>> form = WriterProfileForm(instance=instance)
>>> print form.as_p()
<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="">---------</option>
<option value="..." selected="selected">Bob Woodward</option>
<option value="...">Carl Bernstein</option>
<option value="...">Joe Better</option>
<option value="...">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="text" name="age" value="65" id="id_age" /></p>
# PhoneNumberField ############################################################
>>> class PhoneNumberForm(ModelForm):
... class Meta:
... model = PhoneNumber
>>> f = PhoneNumberForm({'phone': '(312) 555-1212', 'description': 'Assistance'})
>>> f.is_valid()
True
>>> f.cleaned_data['phone']
u'312-555-1212'
>>> f.cleaned_data['description']
u'Assistance'
# FileField ###################################################################
# File forms.
>>> class TextFileForm(ModelForm):
... class Meta:
... model = TextFile
# Test conditions when files is either not given or empty.
>>> f = TextFileForm(data={'description': u'Assistance'})
>>> f.is_valid()
False
>>> f = TextFileForm(data={'description': u'Assistance'}, files={})
>>> f.is_valid()
False
# Upload a file and ensure it all works as expected.
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test1.txt', 'hello world')})
>>> f.is_valid()
True
>>> type(f.cleaned_data['file'])
<class 'django.core.files.uploadedfile.SimpleUploadedFile'>
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test1.txt>
>>> instance.file.delete()
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test1.txt', 'hello world')})
>>> f.is_valid()
True
>>> type(f.cleaned_data['file'])
<class 'django.core.files.uploadedfile.SimpleUploadedFile'>
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test1.txt>
# Check if the max_length attribute has been inherited from the model.
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test-maxlength.txt', 'hello world')})
>>> f.is_valid()
False
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
>>> f = TextFileForm(data={'description': u'Assistance'}, instance=instance)
>>> f.is_valid()
True
>>> f.cleaned_data['file']
<FieldFile: tests/test1.txt>
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test1.txt>
# Delete the current file since this is not done by Django.
>>> instance.file.delete()
# Override the file by uploading a new one.
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test2.txt', 'hello world')}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test2.txt>
# Delete the current file since this is not done by Django.
>>> instance.file.delete()
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test2.txt', 'hello world')})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test2.txt>
# Delete the current file since this is not done by Django.
>>> instance.file.delete()
>>> instance.delete()
# Test the non-required FileField
>>> f = TextFileForm(data={'description': u'Assistance'})
>>> f.fields['file'].required = False
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
<FieldFile: None>
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test3.txt', 'hello world')}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test3.txt>
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
>>> f = TextFileForm(data={'description': u'New Description'}, instance=instance)
>>> f.fields['file'].required = False
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.description
u'New Description'
>>> instance.file
<FieldFile: tests/test3.txt>
# Delete the current file since this is not done by Django.
>>> instance.file.delete()
>>> instance.delete()
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test3.txt', 'hello world')})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test3.txt>
# Delete the current file since this is not done by Django.
>>> instance.file.delete()
>>> instance.delete()
# BigIntegerField ################################################################
>>> class BigIntForm(forms.ModelForm):
... class Meta:
... model = BigInt
...
>>> bif = BigIntForm({'biggie': '-9223372036854775808'})
>>> bif.is_valid()
True
>>> bif = BigIntForm({'biggie': '-9223372036854775809'})
>>> bif.is_valid()
False
>>> bif.errors
{'biggie': [u'Ensure this value is greater than or equal to -9223372036854775808.']}
>>> bif = BigIntForm({'biggie': '9223372036854775807'})
>>> bif.is_valid()
True
>>> bif = BigIntForm({'biggie': '9223372036854775808'})
>>> bif.is_valid()
False
>>> bif.errors
{'biggie': [u'Ensure this value is less than or equal to 9223372036854775807.']}
"""}
if test_images:
__test__['API_TESTS'] += """
# ImageField ###################################################################
# ImageField and FileField are nearly identical, but they differ slighty when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
>>> class ImageFileForm(ModelForm):
... class Meta:
... model = ImageFile
>>> image_data = open(os.path.join(os.path.dirname(__file__), "test.png"), 'rb').read()
>>> image_data2 = open(os.path.join(os.path.dirname(__file__), "test2.png"), 'rb').read()
>>> f = ImageFileForm(data={'description': u'An image'}, files={'image': SimpleUploadedFile('test.png', image_data)})
>>> f.is_valid()
True
>>> type(f.cleaned_data['image'])
<class 'django.core.files.uploadedfile.SimpleUploadedFile'>
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test.png>
>>> instance.width
16
>>> instance.height
16
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
>>> instance.image.delete(save=False)
>>> f = ImageFileForm(data={'description': u'An image'}, files={'image': SimpleUploadedFile('test.png', image_data)})
>>> f.is_valid()
True
>>> type(f.cleaned_data['image'])
<class 'django.core.files.uploadedfile.SimpleUploadedFile'>
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test.png>
>>> instance.width
16
>>> instance.height
16
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
>>> f = ImageFileForm(data={'description': u'Look, it changed'}, instance=instance)
>>> f.is_valid()
True
>>> f.cleaned_data['image']
<...FieldFile: tests/test.png>
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test.png>
>>> instance.height
16
>>> instance.width
16
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
>>> instance.image.delete(save=False)
# Override the file by uploading a new one.
>>> f = ImageFileForm(data={'description': u'Changed it'}, files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test2.png>
>>> instance.height
32
>>> instance.width
48
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
>>> instance.image.delete(save=False)
>>> instance.delete()
>>> f = ImageFileForm(data={'description': u'Changed it'}, files={'image': SimpleUploadedFile('test2.png', image_data2)})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test2.png>
>>> instance.height
32
>>> instance.width
48
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
>>> instance.image.delete(save=False)
>>> instance.delete()
# Test the non-required ImageField
>>> class OptionalImageFileForm(ModelForm):
... class Meta:
... model = OptionalImageFile
>>> f = OptionalImageFileForm(data={'description': u'Test'})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
<...FieldFile: None>
>>> instance.width
>>> instance.height
>>> f = OptionalImageFileForm(data={'description': u'And a final one'}, files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test3.png>
>>> instance.width
16
>>> instance.height
16
# Editing the instance without re-uploading the image should not affect the image or its width/height properties
>>> f = OptionalImageFileForm(data={'description': u'New Description'}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.description
u'New Description'
>>> instance.image
<...FieldFile: tests/test3.png>
>>> instance.width
16
>>> instance.height
16
# Delete the current file since this is not done by Django.
>>> instance.image.delete()
>>> instance.delete()
>>> f = OptionalImageFileForm(data={'description': u'And a final one'}, files={'image': SimpleUploadedFile('test4.png', image_data2)})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test4.png>
>>> instance.width
48
>>> instance.height
32
>>> instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
>>> f = ImageFileForm(data={'description': u'And a final one', 'path': 'foo'}, files={'image': SimpleUploadedFile('test4.png', image_data)})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
<...FieldFile: foo/test4.png>
>>> instance.delete()
"""
__test__['API_TESTS'] += """
# Media on a ModelForm ########################################################
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
>>> class ModelFormWithMedia(ModelForm):
... class Media:
... js = ('/some/form/javascript',)
... css = {
... 'all': ('/some/form/css',)
... }
... class Meta:
... model = PhoneNumber
>>> f = ModelFormWithMedia()
>>> print f.media
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/form/javascript"></script>
>>> class CommaSeparatedIntegerForm(ModelForm):
... class Meta:
... model = CommaSeparatedInteger
>>> f = CommaSeparatedIntegerForm({'field': '1,2,3'})
>>> f.is_valid()
True
>>> f.cleaned_data
{'field': u'1,2,3'}
>>> f = CommaSeparatedIntegerForm({'field': '1a,2'})
>>> f.errors
{'field': [u'Enter only digits separated by commas.']}
>>> f = CommaSeparatedIntegerForm({'field': ',,,,'})
>>> f.is_valid()
True
>>> f.cleaned_data
{'field': u',,,,'}
>>> f = CommaSeparatedIntegerForm({'field': '1.2'})
>>> f.errors
{'field': [u'Enter only digits separated by commas.']}
>>> f = CommaSeparatedIntegerForm({'field': '1,a,2'})
>>> f.errors
{'field': [u'Enter only digits separated by commas.']}
>>> f = CommaSeparatedIntegerForm({'field': '1,,2'})
>>> f.is_valid()
True
>>> f.cleaned_data
{'field': u'1,,2'}
>>> f = CommaSeparatedIntegerForm({'field': '1'})
>>> f.is_valid()
True
>>> f.cleaned_data
{'field': u'1'}
This Price instance generated by this form is not valid because the quantity
field is required, but the form is valid because the field is excluded from
the form. This is for backwards compatibility.
>>> class PriceForm(ModelForm):
... class Meta:
... model = Price
... exclude = ('quantity',)
>>> form = PriceForm({'price': '6.00'})
>>> form.is_valid()
True
>>> price = form.save(commit=False)
>>> price.full_clean()
Traceback (most recent call last):
...
ValidationError: {'quantity': [u'This field cannot be null.']}
The form should not validate fields that it doesn't contain even if they are
specified using 'fields', not 'exclude'.
... class Meta:
... model = Price
... fields = ('price',)
>>> form = PriceForm({'price': '6.00'})
>>> form.is_valid()
True
The form should still have an instance of a model that is not complete and
not saved into a DB yet.
>>> form.instance.price
Decimal('6.00')
>>> form.instance.quantity is None
True
>>> form.instance.pk is None
True
# Choices on CharField and IntegerField
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = ArticleForm()
>>> f.fields['status'].clean('42')
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 42 is not one of the available choices.']
>>> class ArticleStatusForm(ModelForm):
... class Meta:
... model = ArticleStatus
>>> f = ArticleStatusForm()
>>> f.fields['status'].clean('z')
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. z is not one of the available choices.']
# Foreign keys which use to_field #############################################
>>> apple = Inventory.objects.create(barcode=86, name='Apple')
>>> pear = Inventory.objects.create(barcode=22, name='Pear')
>>> core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
>>> field = ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
>>> for choice in field.choices:
... print choice
(u'', u'---------')
(86, u'Apple')
(87, u'Core')
(22, u'Pear')
>>> class InventoryForm(ModelForm):
... class Meta:
... model = Inventory
>>> form = InventoryForm(instance=core)
>>> print form['parent']
<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected="selected">Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>
>>> data = model_to_dict(core)
>>> data['parent'] = '22'
>>> form = InventoryForm(data=data, instance=core)
>>> core = form.save()
>>> core.parent
<Inventory: Pear>
>>> class CategoryForm(ModelForm):
... description = forms.CharField()
... class Meta:
... model = Category
... fields = ['description', 'url']
>>> CategoryForm.base_fields.keys()
['description', 'url']
>>> print CategoryForm()
<tr><th><label for="id_description">Description:</label></th><td><input type="text" name="description" id="id_description" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
# to_field_name should also work on ModelMultipleChoiceField ##################
>>> field = ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
>>> for choice in field.choices:
... print choice
(86, u'Apple')
(87, u'Core')
(22, u'Pear')
>>> field.clean([86])
[<Inventory: Apple>]
>>> class SelectInventoryForm(forms.Form):
... items = ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
>>> form = SelectInventoryForm({'items': [87, 22]})
>>> form.is_valid()
True
>>> form.cleaned_data
{'items': [<Inventory: Core>, <Inventory: Pear>]}
# Model field that returns None to exclude itself with explicit fields ########
>>> class CustomFieldForExclusionForm(ModelForm):
... class Meta:
... model = CustomFieldForExclusionModel
... fields = ['name', 'markup']
>>> CustomFieldForExclusionForm.base_fields.keys()
['name']
>>> print CustomFieldForExclusionForm()
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="10" /></td></tr>
# Clean up
>>> import shutil
>>> shutil.rmtree(temp_storage_dir)
"""
| bsd-3-clause | -1,267,683,232,460,326,100 | 32.584005 | 173 | 0.64972 | false |
rhcarvalho/kombu | kombu/async/aws/sqs/message.py | 9 | 1028 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .ext import (
RawMessage, Message, MHMessage, EncodedMHMessage, JSONMessage,
)
__all__ = ['BaseAsyncMessage', 'AsyncRawMessage', 'AsyncMessage',
'AsyncMHMessage', 'AsyncEncodedMHMessage', 'AsyncJSONMessage']
class BaseAsyncMessage(object):
def delete(self, callback=None):
if self.queue:
return self.queue.delete_message(self, callback)
def change_visibility(self, visibility_timeout, callback=None):
if self.queue:
return self.queue.connection.change_message_visibility(
self.queue, self.receipt_handle, visibility_timeout, callback,
)
class AsyncRawMessage(BaseAsyncMessage, RawMessage):
pass
class AsyncMessage(BaseAsyncMessage, Message):
pass
class AsyncMHMessage(BaseAsyncMessage, MHMessage):
pass
class AsyncEncodedMHMessage(BaseAsyncMessage, EncodedMHMessage):
pass
class AsyncJSONMessage(BaseAsyncMessage, JSONMessage):
pass
| bsd-3-clause | -2,067,044,155,838,424,600 | 23.47619 | 78 | 0.70428 | false |
alfa-addon/addon | plugin.video.alfa/lib/cloudscraper/captcha/2captcha.py | 3 | 10487 | from __future__ import absolute_import
import requests
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from ..exceptions import (
CaptchaServiceUnavailable,
CaptchaAPIError,
CaptchaTimeout,
CaptchaParameter,
CaptchaBadJobID,
CaptchaReportError
)
try:
import polling2
except ImportError:
raise ImportError("Please install the python module 'polling2' via pip")
from . import Captcha
class captchaSolver(Captcha):
def __init__(self):
super(captchaSolver, self).__init__('2captcha')
self.host = 'https://2captcha.com'
self.session = requests.Session()
# ------------------------------------------------------------------------------- #
@staticmethod
def checkErrorStatus(response, request_type):
if response.status_code in [500, 502]:
raise CaptchaServiceUnavailable('2Captcha: Server Side Error {}'.format(response.status_code))
errors = {
'in.php': {
"ERROR_WRONG_USER_KEY": "You've provided api_key parameter value is in incorrect format, it should contain 32 symbols.",
"ERROR_KEY_DOES_NOT_EXIST": "The api_key you've provided does not exists.",
"ERROR_ZERO_BALANCE": "You don't have sufficient funds on your account.",
"ERROR_PAGEURL": "pageurl parameter is missing in your request.",
"ERROR_NO_SLOT_AVAILABLE":
"No Slots Available.\nYou can receive this error in two cases:\n"
"1. If you solve ReCaptcha: the queue of your captchas that are not distributed to workers is too long. "
"Queue limit changes dynamically and depends on total amount of captchas awaiting solution and usually it's between 50 and 100 captchas.\n"
"2. If you solve Normal Captcha: your maximum rate for normal captchas is lower than current rate on the server."
"You can change your maximum rate in your account's settings.",
"ERROR_IP_NOT_ALLOWED": "The request is sent from the IP that is not on the list of your allowed IPs.",
"IP_BANNED": "Your IP address is banned due to many frequent attempts to access the server using wrong authorization keys.",
"ERROR_BAD_TOKEN_OR_PAGEURL":
"You can get this error code when sending ReCaptcha V2. "
"That happens if your request contains invalid pair of googlekey and pageurl. "
"The common reason for that is that ReCaptcha is loaded inside an iframe hosted on another domain/subdomain.",
"ERROR_GOOGLEKEY":
"You can get this error code when sending ReCaptcha V2. "
"That means that sitekey value provided in your request is incorrect: it's blank or malformed.",
"MAX_USER_TURN": "You made more than 60 requests within 3 seconds.Your account is banned for 10 seconds. Ban will be lifted automatically."
},
'res.php': {
"ERROR_CAPTCHA_UNSOLVABLE":
"We are unable to solve your captcha - three of our workers were unable solve it "
"or we didn't get an answer within 90 seconds (300 seconds for ReCaptcha V2). "
"We will not charge you for that request.",
"ERROR_WRONG_USER_KEY": "You've provided api_key parameter value in incorrect format, it should contain 32 symbols.",
"ERROR_KEY_DOES_NOT_EXIST": "The api_key you've provided does not exists.",
"ERROR_WRONG_ID_FORMAT": "You've provided captcha ID in wrong format. The ID can contain numbers only.",
"ERROR_WRONG_CAPTCHA_ID": "You've provided incorrect captcha ID.",
"ERROR_BAD_DUPLICATES":
"Error is returned when 100% accuracy feature is enabled. "
"The error means that max numbers of tries is reached but min number of matches not found.",
"REPORT_NOT_RECORDED": "Error is returned to your complain request if you already complained lots of correctly solved captchas.",
"ERROR_IP_ADDRES":
"You can receive this error code when registering a pingback (callback) IP or domain."
"That happes if your request is coming from an IP address that doesn't match the IP address of your pingback IP or domain.",
"ERROR_TOKEN_EXPIRED": "You can receive this error code when sending GeeTest. That error means that challenge value you provided is expired.",
"ERROR_EMPTY_ACTION": "Action parameter is missing or no value is provided for action parameter."
}
}
rPayload = response.json()
if rPayload.get('status') == 0 and rPayload.get('request') in errors.get(request_type):
raise CaptchaAPIError(
'{} {}'.format(
rPayload['request'],
errors.get(request_type).get(rPayload['request'])
)
)
# ------------------------------------------------------------------------------- #
def reportJob(self, jobID):
if not jobID:
raise CaptchaBadJobID(
"2Captcha: Error bad job id to request Captcha."
)
def _checkRequest(response):
self.checkErrorStatus(response, 'res.php')
if response.ok and response.json().get('status') == 1:
return response
return None
response = polling2.poll(
lambda: self.session.get(
'{}/res.php'.format(self.host),
params={
'key': self.api_key,
'action': 'reportbad',
'id': jobID,
'json': '1'
},
timeout=30
),
check_success=_checkRequest,
step=5,
timeout=180
)
if response:
return True
else:
raise CaptchaReportError(
"2Captcha: Error - Failed to report bad Captcha solve."
)
# ------------------------------------------------------------------------------- #
def requestJob(self, jobID):
if not jobID:
raise CaptchaBadJobID("2Captcha: Error bad job id to request Captcha.")
def _checkRequest(response):
self.checkErrorStatus(response, 'res.php')
if response.ok and response.json().get('status') == 1:
return response
return None
response = polling2.poll(
lambda: self.session.get(
'{}/res.php'.format(self.host),
params={
'key': self.api_key,
'action': 'get',
'id': jobID,
'json': '1'
},
timeout=30
),
check_success=_checkRequest,
step=5,
timeout=180
)
if response:
return response.json().get('request')
else:
raise CaptchaTimeout(
"2Captcha: Error failed to solve Captcha."
)
# ------------------------------------------------------------------------------- #
def requestSolve(self, captchaType, url, siteKey):
def _checkRequest(response):
self.checkErrorStatus(response, 'in.php')
if response.ok and response.json().get("status") == 1 and response.json().get('request'):
return response
return None
data = {
'key': self.api_key,
'pageurl': url,
'json': 1,
'soft_id': 2905
}
data.update(
{
'method': 'userrcaptcha',
'googlekey': siteKey
} if captchaType == 'reCaptcha' else {
'method': 'hcaptcha',
'sitekey': siteKey
}
)
if self.proxy:
data.update(
{
'proxy': self.proxy,
'proxytype': self.proxyType
}
)
response = polling2.poll(
lambda: self.session.post(
'{}/in.php'.format(self.host),
data=data,
allow_redirects=False,
timeout=30
),
check_success=_checkRequest,
step=5,
timeout=180
)
if response:
return response.json().get('request')
else:
raise CaptchaBadJobID(
'2Captcha: Error no job id was returned.'
)
# ------------------------------------------------------------------------------- #
def getCaptchaAnswer(self, captchaType, url, siteKey, captchaParams):
jobID = None
if not captchaParams.get('api_key'):
raise CaptchaParameter(
"2Captcha: Missing api_key parameter."
)
self.api_key = captchaParams.get('api_key')
if captchaParams.get('proxy') and not captchaParams.get('no_proxy'):
hostParsed = urlparse(captchaParams.get('proxy', {}).get('https'))
if not hostParsed.scheme:
raise CaptchaParameter('Cannot parse proxy correctly, bad scheme')
if not hostParsed.netloc:
raise CaptchaParameter('Cannot parse proxy correctly, bad netloc')
self.proxyType = hostParsed.scheme
self.proxy = hostParsed.netloc
else:
self.proxy = None
try:
jobID = self.requestSolve(captchaType, url, siteKey)
return self.requestJob(jobID)
except polling2.TimeoutException:
try:
if jobID:
self.reportJob(jobID)
except polling2.TimeoutException:
raise CaptchaTimeout(
"2Captcha: Captcha solve took to long and also failed reporting the job the job id {}.".format(jobID)
)
raise CaptchaTimeout(
"2Captcha: Captcha solve took to long to execute job id {}, aborting.".format(jobID)
)
# ------------------------------------------------------------------------------- #
captchaSolver()
| gpl-3.0 | 1,021,921,056,511,593,500 | 38.874525 | 159 | 0.525222 | false |
BrechtBa/parsenlp | examples/hs101.py | 3 | 1349 | #!/usr/bin/env/ python
################################################################################
# Copyright 2016 Brecht Baeten
# This file is part of jsonopt.
#
# jsonopt is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jsonopt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jsonopt. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import numpy as np
import jsonopt
# load the problem from a file in json format
with open('json/hs101.json', 'r') as jsonfile:
jsonstring=jsonfile.read()
# parse the problem
problem = jsonopt.Problem(jsonstring=jsonstring)
# solve and get the solution
problem.solve()
values = problem.get_values()
print( 'solution: {}'.format(values['x']) )
print( 'objective: {}'.format(values['objective']) )
| gpl-3.0 | 589,607,433,428,681,000 | 34.459459 | 80 | 0.613047 | false |
ojengwa/grr | config/data_server.py | 13 | 1402 | #!/usr/bin/env python
"""Configuration parameters for the data servers."""
from grr.lib import config_lib
# The Data Store server.
config_lib.DEFINE_integer("Dataserver.stats_frequency", 60,
("Time interval in seconds for data server "
"statistics updates"))
config_lib.DEFINE_list("Dataserver.server_list",
["http://127.0.0.1:7000", "http://127.0.0.1:7001"],
"List of allowed data servers (first is the master).")
config_lib.DEFINE_integer("Dataserver.max_connections", 5,
("Maximum number of connections to the data server "
"per process."))
config_lib.DEFINE_integer("Dataserver.port", 7000,
"Port for a specific data server.")
# Login information for clients of the data servers.
config_lib.DEFINE_list("Dataserver.client_credentials", ["user:pass:rw"],
"List of data server client credentials, given as "
"<username>:<password>:<mode> where mode is r or rw.")
# Login information used by data servers when registering with the master.
config_lib.DEFINE_string("Dataserver.server_username", "server",
"Username for servers.")
config_lib.DEFINE_string("Dataserver.server_password", "server",
"Password for servers.")
| apache-2.0 | -6,089,138,912,266,132,000 | 42.8125 | 78 | 0.599857 | false |
astocko/agpy | agpy/kdist.py | 6 | 4884 | from numpy import sqrt, abs, pi, cos, sin, max, ones, array
from astropy import coordinates
def kdist(l, b, vin, near=True,r0=8.4e3,v0=2.54e2,dynamical=False,
kinematic=True,regular=False,rrgal=False,verbose=False,
inverse=False,silent=False, returnvtan=False):
"""
NAME:
KINDIST
PURPOSE:
To return the distance to an object given l,b,v
CALLING SEQUENCE:
dist = KDIST (L, B, V)
INPUTS:
L, B -- Galactic Longitude and Latitude (decimal degrees)
V - Velocity w.r.t. LSR in km/s
KEYWORD PARAMETERS:
/NEAR, /FAR -- Report the near/far kinematic distances for Q1 and
Q4 data.
RO, VO -- Force values for galactocentric distance for sun and
velocity of the LSR around the GC. Default to 8.4 kpc
and 254 km/s (Reid et al., 2009)
RGAL -- Named keyword containing galactocentric radius of sources.
rrgal - return galactocentric distance in addition to distance from us
/DYNAMICAL -- Use the dynamical definition of the LSR
/KINEMATIC -- Use the kinematic definition of the LSR (default)
/REGULAR -- Do not apply the rotation correction for High mass
star forming regions.
INVERSE -- If set, pass DISTANCE instead of velocity, and output is
velocity
returnvtan - if set, return the tanent velocity and ignore the input
velocity
OUTPUTS:
DIST -- the kinematic distance in units of R0 (defaults to pc).
MODIFICATION HISTORY:
Fri Feb 27 00:47:18 2009, Erik <[email protected]>
Adapted from kindist.pro
Translated from IDL to Python by Adam Ginsburg ([email protected])
"""
dtor = pi/180.
if regular: vs = 0.0
else: vs=15.0
if kinematic or not(dynamical):
solarmotion_ra = ((18+03/6e1+50.29/3.6e3)*15)
solarmotion_dec = (30+0/6e1+16.8/3.6e3)
solarmotion_mag = 20.0
else:
solarmotion_ra = ((17+49/6e1+58.667/3.6e3)*15)
solarmotion_dec = (28+7/6e1+3.96/3.6e3)
solarmotion_mag = 16.55294
cg = coordinates.Galactic(l,b,unit=('deg','deg'))
solarmotion = coordinates.ICRS(solarmotion_ra, solarmotion_dec, unit=('deg','deg'))
# ra,dec = cg.j2000()
# gcirc, 2, solarmotion_ra, solarmotion_dec, ra, dec, theta
theta = cg.separation(solarmotion).to('arcsec').value
vhelio = vin-solarmotion_mag*cos(theta/206265.)
# UVW from Dehnen and Binney
bigu = 10.0
bigv = 5.23
bigw = 7.17
v = vhelio+(bigu*cos(l*dtor)+bigv*sin(l*dtor))*cos(b*dtor)+bigw*sin(b*dtor)
# Compute tangent distance and velocity
rtan = r0*(cos(l*dtor))/(cos(b*dtor))
vTEMP = (1/sin(l*dtor) - v0/(v0-vs)) * ((v0-vs)*sin(l*dtor)*cos(b*dtor))
vhelioTEMP = vTEMP - ((bigu*cos(l*dtor)+bigv*sin(l*dtor))*cos(b*dtor)+bigw*sin(b*dtor))
vtan = vhelioTEMP+solarmotion_mag*cos(theta/206265.)
if returnvtan:
return vtan
# This is r/r0
null = (v0/(v0-vs)+v/((v0-vs)*sin(l*dtor)*cos(b*dtor)))**(-1)
if inverse:
radical = cos(l*dtor) - cos(b*dtor) * vin / r0
null = sqrt(1 - cos(l*dtor)**2 + radical**2)
v = (1/null - v0/(v0-vs)) * ((v0-vs)*sin(l*dtor)*cos(b*dtor))
vhelio = v - ((bigu*cos(l*dtor)+bigv*sin(l*dtor))*cos(b*dtor)+bigw*sin(b*dtor))
vlsr = vhelio+solarmotion_mag*cos(theta/206265.)
return vlsr
else:
if vin > vtan:
if not silent:
print "Velocity is greater than tangent velocity v=%f. Returning tangent distance." % vtan
if rrgal: return rtan,null*r0
return rtan
# The > 0 traps things near the tangent point and sets them to the
# tangent distance. So quietly. Perhaps this should pitch a flag?
radical = max(sqrt(((cos(l*dtor))**2-(1-null**2)) ),0)
fardist = r0*(cos(l*dtor)+radical)/(cos(b*dtor))
neardist = r0*(cos(l*dtor)-radical)/(cos(b*dtor))
rgal = null*r0
ind = (abs(l-180) < 90)
if ind.sum() > 1: neardist[ind] = fardist[ind]
elif ind==True: neardist = fardist
if not(near): dist = fardist
else: dist = neardist
if verbose:
print "radical: %f null: %f vin: %f v: %f vhelio: %f rgal: %f neardist: %f fardist: %f" % (radical,null,vin,v,vhelio,rgal,neardist,fardist)
if rrgal: return abs(dist),abs(rgal)
return abs(dist)
def vector_kdist(x,y,z,**kwargs):
if type(z)==type(1) or type(z)==type(1.0):
z = z*ones(len(x))
v = []
for i,j,k in array([x,y,z]).T:
v.append( kdist(i,j,k,**kwargs) )
return array(v)
def threekpcarm(longitude,radius=3.0,center_distance=8.5):
return sqrt(radius**2+center_distance**2-2*radius*center_distance*cos( (90-3*longitude) / 180. * pi ))
| mit | -3,965,084,348,041,777,000 | 36.282443 | 153 | 0.598894 | false |
tillrohrmann/flink | flink-python/pyflink/datastream/tests/test_stream_execution_environment_completeness.py | 3 | 3043 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase, PyFlinkTestCase
class StreamExecutionEnvironmentCompletenessTests(PythonAPICompletenessTestCase,
PyFlinkTestCase):
@classmethod
def python_class(cls):
return StreamExecutionEnvironment
@classmethod
def java_class(cls):
return "org.apache.flink.streaming.api.environment.StreamExecutionEnvironment"
@classmethod
def excluded_methods(cls):
# Exclude these methods for the time being, because current
# ExecutionEnvironment/StreamExecutionEnvironment do not apply to the
# DataSet/DataStream API, but to the Table API configuration.
# Currently only the methods for configuration is added.
# 'isForceCheckpointing', 'getNumberOfExecutionRetries', 'setNumberOfExecutionRetries'
# is deprecated, exclude them.
return {'getLastJobExecutionResult', 'getId', 'getIdString',
'registerCachedFile', 'createCollectionsEnvironment', 'createLocalEnvironment',
'createRemoteEnvironment', 'addOperator', 'fromElements',
'resetContextEnvironment', 'getCachedFiles', 'generateSequence',
'getNumberOfExecutionRetries', 'getStreamGraph', 'fromParallelCollection',
'readFileStream', 'isForceCheckpointing', 'readFile', 'clean',
'createInput', 'createLocalEnvironmentWithWebUI', 'fromCollection',
'socketTextStream', 'initializeContextEnvironment', 'readTextFile', 'addSource',
'setNumberOfExecutionRetries', 'configure', 'executeAsync', 'registerJobListener',
'clearJobListeners', 'getJobListeners', "fromSource"}
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 | 637,764,542,396,819,300 | 48.080645 | 98 | 0.672034 | false |
JohnCrickett/Sentiment | modelling/sentiment_model.py | 1 | 3595 | from sklearn import model_selection
from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegressionCV
from sklearn.pipeline import Pipeline
import pandas as pd
from modelling.utils import remove_punctuation, remove_stop_words
def determine_sentiment(delta):
"""Returns 1 for positive sentiment, 0 otherwise"""
if delta > 0:
return 1
else:
return 0
def load_data(data_filename):
# disable copy warnings from Pandas
pd.options.mode.chained_assignment = None
data = pd.read_csv(data_filename, encoding='latin1')
# drop any invalid rows, if the data is incomplete
data.dropna(inplace=True)
# sentiment feature generation
data['text'] = data['article_content'].apply(remove_punctuation)
data['text'] = data['text'].apply(remove_stop_words)
# generate price delta and labels
# data['price_delta'] = data['close_31'] - data['open_31']
# data['price_delta_percent'] = \
# ((data['close_31'] - data['open_31']) / data['open_31']) * 100
data['price_delta'] = data['close_31'] - data['close_30']
data['price_delta_percent'] = \
((data['close_31'] - data['close_30']) / data['close_30']) * 100
data['sentiment'] = \
data['price_delta_percent'].apply(determine_sentiment)
return data
def train_model(data):
# create the train / test split
train_X, test_X = \
model_selection.train_test_split(data['article_content'],
train_size=0.7,
random_state=0)
train_Y, test_Y = model_selection.train_test_split(data['sentiment'],
train_size=0.7,
random_state=0)
# TODO wrap all this in a grid search then retrain on the best
# meta parameters
pipeline = Pipeline([('count_vectorizer', CountVectorizer(ngram_range=(1,
1))),
('tfidf_transformer', TfidfTransformer()),
('classifier', LogisticRegressionCV(n_jobs=-1,
solver='sag',
Cs=[10, 1, 0.1,
0.01, 0.001],
max_iter=10000))])
pipeline.fit(train_X, train_Y)
test_predictions = pipeline.predict(test_X)
accuracy = accuracy_score(test_Y, test_predictions) * 100
print("Fully Trained Accuracy: {accuracy:.3f}".format(accuracy=accuracy))
pd.options.mode.chained_assignment = 'warn'
return pipeline
def save_model(model, model_filename):
joblib.dump(model, model_filename)
def load_model(model_filename):
model = joblib.load(model_filename)
return model
def predict(model, text):
df = pd.DataFrame(data=[{'article': text}])
return model.predict_proba(df['article'])[0]
def check_model(model, data):
test_predictions = model.predict(data['article_content'])
accuracy = accuracy_score(data['sentiment'], test_predictions) * 100
print("Restored Model Accuracy: {accuracy:.3f}".format(accuracy=accuracy))
data['predictions'] = test_predictions
data.to_csv('./data/td_with_predict.csv')
| mit | -1,249,370,185,584,871,200 | 33.567308 | 80 | 0.579972 | false |
willusher/ansible-modules-core | files/fetch.py | 40 | 3516 | # this is a virtual module that is entirely implemented server side
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: fetch
short_description: Fetches a file from remote nodes
description:
- This module works like M(copy), but in reverse. It is used for fetching
files from remote machines and storing them locally in a file tree,
organized by hostname. Note that this module is written to transfer
log files that might not be present, so a missing remote file won't
be an error unless fail_on_missing is set to 'yes'.
version_added: "0.2"
options:
src:
description:
- The file on the remote system to fetch. This I(must) be a file, not a
directory. Recursive fetching may be supported in a later release.
required: true
default: null
aliases: []
dest:
description:
- A directory to save the file into. For example, if the I(dest)
directory is C(/backup) a I(src) file named C(/etc/profile) on host
C(host.example.com), would be saved into
C(/backup/host.example.com/etc/profile)
required: true
default: null
fail_on_missing:
version_added: "1.1"
description:
- When set to 'yes', the task will fail if the source file is missing.
required: false
choices: [ "yes", "no" ]
default: "no"
validate_checksum:
version_added: "1.4"
description:
- Verify that the source and destination checksums match after the files are fetched.
required: false
choices: [ "yes", "no" ]
default: "yes"
aliases: [ "validate_md5" ]
flat:
version_added: "1.2"
description:
- Allows you to override the default behavior of appending
hostname/path/to/file to the destination. If dest ends with '/', it
will use the basename of the source file, similar to the copy module.
Obviously this is only handy if the filenames are unique.
requirements: []
author:
- "Ansible Core Team"
- "Michael DeHaan"
notes:
- When running fetch with C(become), the M(slurp) module will also be
used to fetch the contents of the file for determining the remote
checksum. This effectively doubles the transfer size, and
depending on the file size can consume all available memory on the
remote or local hosts causing a C(MemoryError). Due to this it is
advisable to run this module without C(become) whenever possible.
'''
EXAMPLES = '''
# Store file into /tmp/fetched/host.example.com/tmp/somefile
- fetch: src=/tmp/somefile dest=/tmp/fetched
# Specifying a path directly
- fetch: src=/tmp/somefile dest=/tmp/prefix-{{ inventory_hostname }} flat=yes
# Specifying a destination path
- fetch: src=/tmp/uniquefile dest=/tmp/special/ flat=yes
# Storing in a path relative to the playbook
- fetch: src=/tmp/uniquefile dest=special/prefix-{{ inventory_hostname }} flat=yes
'''
| gpl-3.0 | 5,792,329,404,254,870,000 | 37.217391 | 91 | 0.701934 | false |
yongtang/tensorflow | tensorflow/python/keras/utils/generic_utils.py | 6 | 42150 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python utilities required by Keras."""
import binascii
import codecs
import importlib
import marshal
import os
import re
import sys
import threading
import time
import types as python_types
import warnings
import weakref
import numpy as np
from tensorflow.python.keras.utils import tf_contextlib
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.tf_export import keras_export
_GLOBAL_CUSTOM_OBJECTS = {}
_GLOBAL_CUSTOM_NAMES = {}
# Flag that determines whether to skip the NotImplementedError when calling
# get_config in custom models and layers. This is only enabled when saving to
# SavedModel, when the config isn't required.
_SKIP_FAILED_SERIALIZATION = False
# If a layer does not have a defined config, then the returned config will be a
# dictionary with the below key.
_LAYER_UNDEFINED_CONFIG_KEY = 'layer was saved without config'
@keras_export('keras.utils.custom_object_scope', # pylint: disable=g-classes-have-attributes
'keras.utils.CustomObjectScope')
class CustomObjectScope(object):
"""Exposes custom classes/functions to Keras deserialization internals.
Under a scope `with custom_object_scope(objects_dict)`, Keras methods such
as `tf.keras.models.load_model` or `tf.keras.models.model_from_config`
will be able to deserialize any custom object referenced by a
saved config (e.g. a custom layer or metric).
Example:
Consider a custom regularizer `my_regularizer`:
```python
layer = Dense(3, kernel_regularizer=my_regularizer)
config = layer.get_config() # Config contains a reference to `my_regularizer`
...
# Later:
with custom_object_scope({'my_regularizer': my_regularizer}):
layer = Dense.from_config(config)
```
Args:
*args: Dictionary or dictionaries of `{name: object}` pairs.
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
@keras_export('keras.utils.get_custom_objects')
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access the current collection of custom objects.
Example:
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
Returns:
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
# Store a unique, per-object ID for shared objects.
#
# We store a unique ID for each object so that we may, at loading time,
# re-create the network properly. Without this ID, we would have no way of
# determining whether a config is a description of a new object that
# should be created or is merely a reference to an already-created object.
SHARED_OBJECT_KEY = 'shared_object_id'
SHARED_OBJECT_DISABLED = threading.local()
SHARED_OBJECT_LOADING = threading.local()
SHARED_OBJECT_SAVING = threading.local()
# Attributes on the threadlocal variable must be set per-thread, thus we
# cannot initialize these globally. Instead, we have accessor functions with
# default values.
def _shared_object_disabled():
"""Get whether shared object handling is disabled in a threadsafe manner."""
return getattr(SHARED_OBJECT_DISABLED, 'disabled', False)
def _shared_object_loading_scope():
"""Get the current shared object saving scope in a threadsafe manner."""
return getattr(SHARED_OBJECT_LOADING, 'scope', NoopLoadingScope())
def _shared_object_saving_scope():
"""Get the current shared object saving scope in a threadsafe manner."""
return getattr(SHARED_OBJECT_SAVING, 'scope', None)
class DisableSharedObjectScope(object):
"""A context manager for disabling handling of shared objects.
Disables shared object handling for both saving and loading.
Created primarily for use with `clone_model`, which does extra surgery that
is incompatible with shared objects.
"""
def __enter__(self):
SHARED_OBJECT_DISABLED.disabled = True
self._orig_loading_scope = _shared_object_loading_scope()
self._orig_saving_scope = _shared_object_saving_scope()
def __exit__(self, *args, **kwargs):
SHARED_OBJECT_DISABLED.disabled = False
SHARED_OBJECT_LOADING.scope = self._orig_loading_scope
SHARED_OBJECT_SAVING.scope = self._orig_saving_scope
class NoopLoadingScope(object):
"""The default shared object loading scope. It does nothing.
Created to simplify serialization code that doesn't care about shared objects
(e.g. when serializing a single object).
"""
def get(self, unused_object_id):
return None
def set(self, object_id, obj):
pass
class SharedObjectLoadingScope(object):
"""A context manager for keeping track of loaded objects.
During the deserialization process, we may come across objects that are
shared across multiple layers. In order to accurately restore the network
structure to its original state, `SharedObjectLoadingScope` allows us to
re-use shared objects rather than cloning them.
"""
def __enter__(self):
if _shared_object_disabled():
return NoopLoadingScope()
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = self
self._obj_ids_to_obj = {}
return self
def get(self, object_id):
"""Given a shared object ID, returns a previously instantiated object.
Args:
object_id: shared object ID to use when attempting to find already-loaded
object.
Returns:
The object, if we've seen this ID before. Else, `None`.
"""
# Explicitly check for `None` internally to make external calling code a
# bit cleaner.
if object_id is None:
return
return self._obj_ids_to_obj.get(object_id)
def set(self, object_id, obj):
"""Stores an instantiated object for future lookup and sharing."""
if object_id is None:
return
self._obj_ids_to_obj[object_id] = obj
def __exit__(self, *args, **kwargs):
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = NoopLoadingScope()
class SharedObjectConfig(dict):
"""A configuration container that keeps track of references.
`SharedObjectConfig` will automatically attach a shared object ID to any
configs which are referenced more than once, allowing for proper shared
object reconstruction at load time.
In most cases, it would be more proper to subclass something like
`collections.UserDict` or `collections.Mapping` rather than `dict` directly.
Unfortunately, python's json encoder does not support `Mapping`s. This is
important functionality to retain, since we are dealing with serialization.
We should be safe to subclass `dict` here, since we aren't actually
overriding any core methods, only augmenting with a new one for reference
counting.
"""
def __init__(self, base_config, object_id, **kwargs):
self.ref_count = 1
self.object_id = object_id
super(SharedObjectConfig, self).__init__(base_config, **kwargs)
def increment_ref_count(self):
# As soon as we've seen the object more than once, we want to attach the
# shared object ID. This allows us to only attach the shared object ID when
# it's strictly necessary, making backwards compatibility breakage less
# likely.
if self.ref_count == 1:
self[SHARED_OBJECT_KEY] = self.object_id
self.ref_count += 1
class SharedObjectSavingScope(object):
"""Keeps track of shared object configs when serializing."""
def __enter__(self):
if _shared_object_disabled():
return None
global SHARED_OBJECT_SAVING
# Serialization can happen at a number of layers for a number of reasons.
# We may end up with a case where we're opening a saving scope within
# another saving scope. In that case, we'd like to use the outermost scope
# available and ignore inner scopes, since there is not (yet) a reasonable
# use case for having these nested and distinct.
if _shared_object_saving_scope() is not None:
self._passthrough = True
return _shared_object_saving_scope()
else:
self._passthrough = False
SHARED_OBJECT_SAVING.scope = self
self._shared_objects_config = weakref.WeakKeyDictionary()
self._next_id = 0
return self
def get_config(self, obj):
"""Gets a `SharedObjectConfig` if one has already been seen for `obj`.
Args:
obj: The object for which to retrieve the `SharedObjectConfig`.
Returns:
The SharedObjectConfig for a given object, if already seen. Else,
`None`.
"""
try:
shared_object_config = self._shared_objects_config[obj]
except (TypeError, KeyError):
# If the object is unhashable (e.g. a subclass of `AbstractBaseClass`
# that has not overridden `__hash__`), a `TypeError` will be thrown.
# We'll just continue on without shared object support.
return None
shared_object_config.increment_ref_count()
return shared_object_config
def create_config(self, base_config, obj):
"""Create a new SharedObjectConfig for a given object."""
shared_object_config = SharedObjectConfig(base_config, self._next_id)
self._next_id += 1
try:
self._shared_objects_config[obj] = shared_object_config
except TypeError:
# If the object is unhashable (e.g. a subclass of `AbstractBaseClass`
# that has not overridden `__hash__`), a `TypeError` will be thrown.
# We'll just continue on without shared object support.
pass
return shared_object_config
def __exit__(self, *args, **kwargs):
if not getattr(self, '_passthrough', False):
global SHARED_OBJECT_SAVING
SHARED_OBJECT_SAVING.scope = None
def serialize_keras_class_and_config(
cls_name, cls_config, obj=None, shared_object_id=None):
"""Returns the serialization of the class with the given config."""
base_config = {'class_name': cls_name, 'config': cls_config}
# We call `serialize_keras_class_and_config` for some branches of the load
# path. In that case, we may already have a shared object ID we'd like to
# retain.
if shared_object_id is not None:
base_config[SHARED_OBJECT_KEY] = shared_object_id
# If we have an active `SharedObjectSavingScope`, check whether we've already
# serialized this config. If so, just use that config. This will store an
# extra ID field in the config, allowing us to re-create the shared object
# relationship at load time.
if _shared_object_saving_scope() is not None and obj is not None:
shared_object_config = _shared_object_saving_scope().get_config(obj)
if shared_object_config is None:
return _shared_object_saving_scope().create_config(base_config, obj)
return shared_object_config
return base_config
@keras_export('keras.utils.register_keras_serializable')
def register_keras_serializable(package='Custom', name=None):
"""Registers an object with the Keras serialization framework.
This decorator injects the decorated class or function into the Keras custom
object dictionary, so that it can be serialized and deserialized without
needing an entry in the user-provided custom object dict. It also injects a
function that Keras will call to get the object's serializable string key.
Note that to be serialized and deserialized, classes must implement the
`get_config()` method. Functions do not have this requirement.
The object will be registered under the key 'package>name' where `name`,
defaults to the object name if not passed.
Args:
package: The package that this class belongs to.
name: The name to serialize this class under in this package. If None, the
class' name will be used.
Returns:
A decorator that registers the decorated class with the passed names.
"""
def decorator(arg):
"""Registers a class with the Keras serialization framework."""
class_name = name if name is not None else arg.__name__
registered_name = package + '>' + class_name
if tf_inspect.isclass(arg) and not hasattr(arg, 'get_config'):
raise ValueError(
'Cannot register a class that does not have a get_config() method.')
if registered_name in _GLOBAL_CUSTOM_OBJECTS:
raise ValueError(
'%s has already been registered to %s' %
(registered_name, _GLOBAL_CUSTOM_OBJECTS[registered_name]))
if arg in _GLOBAL_CUSTOM_NAMES:
raise ValueError('%s has already been registered to %s' %
(arg, _GLOBAL_CUSTOM_NAMES[arg]))
_GLOBAL_CUSTOM_OBJECTS[registered_name] = arg
_GLOBAL_CUSTOM_NAMES[arg] = registered_name
return arg
return decorator
@keras_export('keras.utils.get_registered_name')
def get_registered_name(obj):
"""Returns the name registered to an object within the Keras framework.
This function is part of the Keras serialization and deserialization
framework. It maps objects to the string names associated with those objects
for serialization/deserialization.
Args:
obj: The object to look up.
Returns:
The name associated with the object, or the default Python name if the
object is not registered.
"""
if obj in _GLOBAL_CUSTOM_NAMES:
return _GLOBAL_CUSTOM_NAMES[obj]
else:
return obj.__name__
@tf_contextlib.contextmanager
def skip_failed_serialization():
global _SKIP_FAILED_SERIALIZATION
prev = _SKIP_FAILED_SERIALIZATION
try:
_SKIP_FAILED_SERIALIZATION = True
yield
finally:
_SKIP_FAILED_SERIALIZATION = prev
@keras_export('keras.utils.get_registered_object')
def get_registered_object(name, custom_objects=None, module_objects=None):
"""Returns the class associated with `name` if it is registered with Keras.
This function is part of the Keras serialization and deserialization
framework. It maps strings to the objects associated with them for
serialization/deserialization.
Example:
```
def from_config(cls, config, custom_objects=None):
if 'my_custom_object_name' in config:
config['hidden_cls'] = tf.keras.utils.get_registered_object(
config['my_custom_object_name'], custom_objects=custom_objects)
```
Args:
name: The name to look up.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, custom_objects is provided by the user.
module_objects: A dictionary of custom objects to look the name up in.
Generally, module_objects is provided by midlevel library implementers.
Returns:
An instantiable class associated with 'name', or None if no such class
exists.
"""
if name in _GLOBAL_CUSTOM_OBJECTS:
return _GLOBAL_CUSTOM_OBJECTS[name]
elif custom_objects and name in custom_objects:
return custom_objects[name]
elif module_objects and name in module_objects:
return module_objects[name]
return None
# pylint: disable=g-bad-exception-name
class CustomMaskWarning(Warning):
pass
# pylint: enable=g-bad-exception-name
@keras_export('keras.utils.serialize_keras_object')
def serialize_keras_object(instance):
"""Serialize a Keras object into a JSON-compatible representation.
Calls to `serialize_keras_object` while underneath the
`SharedObjectSavingScope` context manager will cause any objects re-used
across multiple layers to be saved with a special shared object ID. This
allows the network to be re-created properly during deserialization.
Args:
instance: The object to serialize.
Returns:
A dict-like, JSON-compatible representation of the object's config.
"""
_, instance = tf_decorator.unwrap(instance)
if instance is None:
return None
# pylint: disable=protected-access
#
# For v1 layers, checking supports_masking is not enough. We have to also
# check whether compute_mask has been overridden.
supports_masking = (getattr(instance, 'supports_masking', False)
or (hasattr(instance, 'compute_mask')
and not is_default(instance.compute_mask)))
if supports_masking and is_default(instance.get_config):
warnings.warn('Custom mask layers require a config and must override '
'get_config. When loading, the custom mask layer must be '
'passed to the custom_objects argument.',
category=CustomMaskWarning)
# pylint: enable=protected-access
if hasattr(instance, 'get_config'):
name = get_registered_name(instance.__class__)
try:
config = instance.get_config()
except NotImplementedError as e:
if _SKIP_FAILED_SERIALIZATION:
return serialize_keras_class_and_config(
name, {_LAYER_UNDEFINED_CONFIG_KEY: True})
raise e
serialization_config = {}
for key, item in config.items():
if isinstance(item, str):
serialization_config[key] = item
continue
# Any object of a different type needs to be converted to string or dict
# for serialization (e.g. custom functions, custom classes)
try:
serialized_item = serialize_keras_object(item)
if isinstance(serialized_item, dict) and not isinstance(item, dict):
serialized_item['__passive_serialization__'] = True
serialization_config[key] = serialized_item
except ValueError:
serialization_config[key] = item
name = get_registered_name(instance.__class__)
return serialize_keras_class_and_config(
name, serialization_config, instance)
if hasattr(instance, '__name__'):
return get_registered_name(instance)
raise ValueError('Cannot serialize', instance)
def get_custom_objects_by_name(item, custom_objects=None):
"""Returns the item if it is in either local or global custom objects."""
if item in _GLOBAL_CUSTOM_OBJECTS:
return _GLOBAL_CUSTOM_OBJECTS[item]
elif custom_objects and item in custom_objects:
return custom_objects[item]
return None
def class_and_config_for_serialized_keras_object(
config,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
"""Returns the class name and config for a serialized keras object."""
if (not isinstance(config, dict)
or 'class_name' not in config
or 'config' not in config):
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
cls = get_registered_object(class_name, custom_objects, module_objects)
if cls is None:
raise ValueError(
'Unknown {}: {}. Please ensure this object is '
'passed to the `custom_objects` argument. See '
'https://www.tensorflow.org/guide/keras/save_and_serialize'
'#registering_the_custom_object for details.'
.format(printable_module_name, class_name))
cls_config = config['config']
# Check if `cls_config` is a list. If it is a list, return the class and the
# associated class configs for recursively deserialization. This case will
# happen on the old version of sequential model (e.g. `keras_version` ==
# "2.0.6"), which is serialized in a different structure, for example
# "{'class_name': 'Sequential',
# 'config': [{'class_name': 'Embedding', 'config': ...}, {}, ...]}".
if isinstance(cls_config, list):
return (cls, cls_config)
deserialized_objects = {}
for key, item in cls_config.items():
if key == 'name':
# Assume that the value of 'name' is a string that should not be
# deserialized as a function. This avoids the corner case where
# cls_config['name'] has an identical name to a custom function and
# gets converted into that function.
deserialized_objects[key] = item
elif isinstance(item, dict) and '__passive_serialization__' in item:
deserialized_objects[key] = deserialize_keras_object(
item,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name='config_item')
# TODO(momernick): Should this also have 'module_objects'?
elif (isinstance(item, str) and
tf_inspect.isfunction(get_registered_object(item, custom_objects))):
# Handle custom functions here. When saving functions, we only save the
# function's name as a string. If we find a matching string in the custom
# objects during deserialization, we convert the string back to the
# original function.
# Note that a potential issue is that a string field could have a naming
# conflict with a custom function name, but this should be a rare case.
# This issue does not occur if a string field has a naming conflict with
# a custom object, since the config of an object will always be a dict.
deserialized_objects[key] = get_registered_object(item, custom_objects)
for key, item in deserialized_objects.items():
cls_config[key] = deserialized_objects[key]
return (cls, cls_config)
@keras_export('keras.utils.deserialize_keras_object')
def deserialize_keras_object(identifier,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
"""Turns the serialized form of a Keras object back into an actual object.
This function is for mid-level library implementers rather than end users.
Importantly, this utility requires you to provide the dict of `module_objects`
to use for looking up the object config; this is not populated by default.
If you need a deserialization utility that has preexisting knowledge of
built-in Keras objects, use e.g. `keras.layers.deserialize(config)`,
`keras.metrics.deserialize(config)`, etc.
Calling `deserialize_keras_object` while underneath the
`SharedObjectLoadingScope` context manager will cause any already-seen shared
objects to be returned as-is rather than creating a new object.
Args:
identifier: the serialized form of the object.
module_objects: A dictionary of built-in objects to look the name up in.
Generally, `module_objects` is provided by midlevel library implementers.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, `custom_objects` is provided by the end user.
printable_module_name: A human-readable string representing the type of the
object. Printed in case of exception.
Returns:
The deserialized object.
Example:
A mid-level library implementer might want to implement a utility for
retrieving an object from its config, as such:
```python
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
identifier,
module_objects=globals(),
custom_objects=custom_objects,
name="MyObjectType",
)
```
This is how e.g. `keras.layers.deserialize()` is implemented.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
(cls, cls_config) = class_and_config_for_serialized_keras_object(
config, module_objects, custom_objects, printable_module_name)
# If this object has already been loaded (i.e. it's shared between multiple
# objects), return the already-loaded object.
shared_object_id = config.get(SHARED_OBJECT_KEY)
shared_object = _shared_object_loading_scope().get(shared_object_id) # pylint: disable=assignment-from-none
if shared_object is not None:
return shared_object
if hasattr(cls, 'from_config'):
arg_spec = tf_inspect.getfullargspec(cls.from_config)
custom_objects = custom_objects or {}
if 'custom_objects' in arg_spec.args:
deserialized_obj = cls.from_config(
cls_config,
custom_objects=dict(
list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
else:
with CustomObjectScope(custom_objects):
deserialized_obj = cls.from_config(cls_config)
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with CustomObjectScope(custom_objects):
deserialized_obj = cls(**cls_config)
# Add object to shared objects, in case we find it referenced again.
_shared_object_loading_scope().set(shared_object_id, deserialized_obj)
return deserialized_obj
elif isinstance(identifier, str):
object_name = identifier
if custom_objects and object_name in custom_objects:
obj = custom_objects.get(object_name)
elif object_name in _GLOBAL_CUSTOM_OBJECTS:
obj = _GLOBAL_CUSTOM_OBJECTS[object_name]
else:
obj = module_objects.get(object_name)
if obj is None:
raise ValueError(
'Unknown {}: {}. Please ensure this object is '
'passed to the `custom_objects` argument. See '
'https://www.tensorflow.org/guide/keras/save_and_serialize'
'#registering_the_custom_object for details.'
.format(printable_module_name, object_name))
# Classes passed by name are instantiated with no args, functions are
# returned as-is.
if tf_inspect.isclass(obj):
return obj()
return obj
elif tf_inspect.isfunction(identifier):
# If a function has already been deserialized, return as is.
return identifier
else:
raise ValueError('Could not interpret serialized %s: %s' %
(printable_module_name, identifier))
def func_dump(func):
"""Serializes a user defined function.
Args:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == 'nt':
raw_code = marshal.dumps(func.__code__).replace(b'\\', b'/')
code = codecs.encode(raw_code, 'base64').decode('ascii')
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, 'base64').decode('ascii')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Args:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
Args:
value: Any value that needs to be casted to the cell type
Returns:
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
# pylint: disable=pointless-statement
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode('ascii'), 'base64')
except (UnicodeEncodeError, binascii.Error):
raw_code = code.encode('raw_unicode_escape')
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure)
def has_arg(fn, name, accept_all=False):
"""Checks if a callable accepts a given keyword argument.
Args:
fn: Callable to inspect.
name: Check if `fn` can be called with `name` as a keyword argument.
accept_all: What to return if there is no parameter called `name` but the
function accepts a `**kwargs` argument.
Returns:
bool, whether `fn` accepts a `name` keyword argument.
"""
arg_spec = tf_inspect.getfullargspec(fn)
if accept_all and arg_spec.varkw is not None:
return True
return name in arg_spec.args or name in arg_spec.kwonlyargs
@keras_export('keras.utils.Progbar')
class Progbar(object):
"""Displays a progress bar.
Args:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that should *not* be
averaged over time. Metrics in this list will be displayed as-is. All
others will be averaged by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
unit_name: Display name for step counts (usually "step" or "sample").
"""
def __init__(self,
target,
width=30,
verbose=1,
interval=0.05,
stateful_metrics=None,
unit_name='step'):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.unit_name = unit_name
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules or
'posix' in sys.modules or
'PYCHARM_HOSTED' in os.environ)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
self._time_after_first_step = None
def update(self, current, values=None, finalize=None):
"""Updates the progress bar.
Args:
current: Index of current step.
values: List of tuples: `(name, value_for_last_step)`. If `name` is in
`stateful_metrics`, `value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
finalize: Whether this is the last update for the progress bar. If
`None`, defaults to `current >= self.target`.
"""
if finalize is None:
if self.target is None:
finalize = False
else:
finalize = current >= self.target
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
# In the case that progress bar doesn't have a target value in the first
# epoch, both on_batch_end and on_epoch_end will be called, which will
# cause 'current' and 'self._seen_so_far' to have the same value. Force
# the minimal value to 1 here, otherwise stateful_metric will be 0s.
value_base = max(current - self._seen_so_far, 1)
if k not in self._values:
self._values[k] = [v * value_base, value_base]
else:
self._values[k][0] += v * value_base
self._values[k][1] += value_base
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if now - self._last_update < self.interval and not finalize:
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.log10(self.target)) + 1
bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
time_per_unit = self._estimate_step_duration(current, now)
if self.target is None or finalize:
if time_per_unit >= 1 or time_per_unit == 0:
info += ' %.0fs/%s' % (time_per_unit, self.unit_name)
elif time_per_unit >= 1e-3:
info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name)
else:
info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name)
else:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600,
(eta % 3600) // 60, eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
for k in self._values_order:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if finalize:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if finalize:
numdigits = int(np.log10(self.target)) + 1
count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)
info = count + info
for k in self._values_order:
info += ' - %s:' % k
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def _estimate_step_duration(self, current, now):
"""Estimate the duration of a single step.
Given the step number `current` and the corresponding time `now`
this function returns an estimate for how long a single step
takes. If this is called before one step has been completed
(i.e. `current == 0`) then zero is given as an estimate. The duration
estimate ignores the duration of the (assumed to be non-representative)
first step for estimates when more steps are available (i.e. `current>1`).
Args:
current: Index of current step.
now: The current time.
Returns: Estimate of the duration of a single step.
"""
if current:
# there are a few special scenarios here:
# 1) somebody is calling the progress bar without ever supplying step 1
# 2) somebody is calling the progress bar and supplies step one mulitple
# times, e.g. as part of a finalizing call
# in these cases, we just fall back to the simple calculation
if self._time_after_first_step is not None and current > 1:
time_per_unit = (now - self._time_after_first_step) / (current - 1)
else:
time_per_unit = (now - self._start) / current
if current == 1:
self._time_after_first_step = now
return time_per_unit
else:
return 0
def _update_stateful_metrics(self, stateful_metrics):
self.stateful_metrics = self.stateful_metrics.union(stateful_metrics)
def make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
Args:
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
Returns:
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, num_batches)]
def slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `slice_arrays(x, indices)`
Args:
arrays: Single array or list of arrays.
start: can be an integer index (start index) or a list/array of indices
stop: integer (stop index); should be None if `start` was a list.
Returns:
A slice of the array(s).
Raises:
ValueError: If the value of start is a list and stop is not None.
"""
if arrays is None:
return [None]
if isinstance(start, list) and stop is not None:
raise ValueError('The stop argument has to be None if the value of start '
'is a list.')
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
return [
None if x is None else
None if not hasattr(x, '__getitem__') else x[start:stop] for x in arrays
]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
if hasattr(start, '__getitem__'):
return arrays[start:stop]
return [None]
def to_list(x):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Args:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, list):
return x
return [x]
def to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def is_all_none(structure):
iterable = nest.flatten(structure)
# We cannot use Python's `any` because the iterable may return Tensors.
for element in iterable:
if element is not None:
return False
return True
def check_for_unexpected_keys(name, input_dict, expected_values):
unknown = set(input_dict.keys()).difference(expected_values)
if unknown:
raise ValueError('Unknown entries in {} dictionary: {}. Only expected '
'following keys: {}'.format(name, list(unknown),
expected_values))
def validate_kwargs(kwargs,
allowed_kwargs,
error_message='Keyword argument not understood:'):
"""Checks that all keyword arguments are in the set of allowed keys."""
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError(error_message, kwarg)
def validate_config(config):
"""Determines whether config appears to be a valid layer config."""
return isinstance(config, dict) and _LAYER_UNDEFINED_CONFIG_KEY not in config
def default(method):
"""Decorates a method to detect overrides in subclasses."""
method._is_default = True # pylint: disable=protected-access
return method
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
return getattr(method, '_is_default', False)
def populate_dict_with_module_objects(target_dict, modules, obj_filter):
for module in modules:
for name in dir(module):
obj = getattr(module, name)
if obj_filter(obj):
target_dict[name] = obj
class LazyLoader(python_types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies."""
def __init__(self, local_name, parent_module_globals, name):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(LazyLoader, self).__init__(name)
def _load(self):
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
# Aliases
custom_object_scope = CustomObjectScope # pylint: disable=invalid-name
| apache-2.0 | -8,389,404,340,543,426,000 | 34.242475 | 112 | 0.668185 | false |
darktears/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py | 35 | 18382 | # Copyright (c) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import operator
import re
import urllib
import urllib2
import webkitpy.common.config.urls as config_urls
from webkitpy.common.memoized import memoized
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.networktransaction import NetworkTransaction
from webkitpy.common.system.logutils import get_logger
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
_log = get_logger(__file__)
class Builder(object):
def __init__(self, name, buildbot):
self._name = name
self._buildbot = buildbot
self._builds_cache = {}
self._revision_to_build_number = None
def name(self):
return self._name
def results_url(self):
return config_urls.chromium_results_url_base_for_builder(self._name)
def accumulated_results_url(self):
return config_urls.chromium_accumulated_results_url_base_for_builder(self._name)
def latest_layout_test_results_url(self):
return self.accumulated_results_url() or self.latest_cached_build().results_url();
@memoized
def latest_layout_test_results(self):
return self.fetch_layout_test_results(self.latest_layout_test_results_url())
def _fetch_file_from_results(self, results_url, file_name):
# It seems this can return None if the url redirects and then returns 404.
result = urllib2.urlopen("%s/%s" % (results_url, file_name))
if not result:
return None
# urlopen returns a file-like object which sometimes works fine with str()
# but sometimes is a addinfourl object. In either case calling read() is correct.
return result.read()
def fetch_layout_test_results(self, results_url):
# FIXME: This should cache that the result was a 404 and stop hitting the network.
results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "failing_results.json"))
return LayoutTestResults.results_from_string(results_file)
def url_encoded_name(self):
return urllib.quote(self._name)
def url(self):
return "%s/builders/%s" % (self._buildbot.buildbot_url, self.url_encoded_name())
# This provides a single place to mock
def _fetch_build(self, build_number):
build_dictionary = self._buildbot._fetch_build_dictionary(self, build_number)
if not build_dictionary:
return None
revision_string = build_dictionary['sourceStamp']['revision']
return Build(self,
build_number=int(build_dictionary['number']),
# 'revision' may be None if a trunk build was started by the force-build button on the web page.
revision=(int(revision_string) if revision_string else None),
# Buildbot uses any nubmer other than 0 to mean fail. Since we fetch with
# filter=1, passing builds may contain no 'results' value.
is_green=(not build_dictionary.get('results')),
)
def build(self, build_number):
if not build_number:
return None
cached_build = self._builds_cache.get(build_number)
if cached_build:
return cached_build
build = self._fetch_build(build_number)
self._builds_cache[build_number] = build
return build
def latest_cached_build(self):
revision_build_pairs = self.revision_build_pairs_with_results()
revision_build_pairs.sort(key=lambda i: i[1])
latest_build_number = revision_build_pairs[-1][1]
return self.build(latest_build_number)
file_name_regexp = re.compile(r"r(?P<revision>\d+) \((?P<build_number>\d+)\)")
def _revision_and_build_for_filename(self, filename):
# Example: "r47483 (1)/" or "r47483 (1).zip"
match = self.file_name_regexp.match(filename)
if not match:
return None
return (int(match.group("revision")), int(match.group("build_number")))
def _fetch_revision_to_build_map(self):
# All _fetch requests go through _buildbot for easier mocking
# FIXME: This should use NetworkTransaction's 404 handling instead.
try:
# FIXME: This method is horribly slow due to the huge network load.
# FIXME: This is a poor way to do revision -> build mapping.
# Better would be to ask buildbot through some sort of API.
print "Loading revision/build list from %s." % self.results_url()
print "This may take a while..."
result_files = self._buildbot._fetch_twisted_directory_listing(self.results_url())
except urllib2.HTTPError, error:
if error.code != 404:
raise
_log.debug("Revision/build list failed to load.")
result_files = []
return dict(self._file_info_list_to_revision_to_build_list(result_files))
def _file_info_list_to_revision_to_build_list(self, file_info_list):
# This assumes there was only one build per revision, which is false but we don't care for now.
revisions_and_builds = []
for file_info in file_info_list:
revision_and_build = self._revision_and_build_for_filename(file_info["filename"])
if revision_and_build:
revisions_and_builds.append(revision_and_build)
return revisions_and_builds
def _revision_to_build_map(self):
if not self._revision_to_build_number:
self._revision_to_build_number = self._fetch_revision_to_build_map()
return self._revision_to_build_number
def revision_build_pairs_with_results(self):
return self._revision_to_build_map().items()
# This assumes there can be only one build per revision, which is false, but we don't care for now.
def build_for_revision(self, revision, allow_failed_lookups=False):
# NOTE: This lookup will fail if that exact revision was never built.
build_number = self._revision_to_build_map().get(int(revision))
if not build_number:
return None
build = self.build(build_number)
if not build and allow_failed_lookups:
# Builds for old revisions with fail to lookup via buildbot's json api.
build = Build(self,
build_number=build_number,
revision=revision,
is_green=False,
)
return build
class Build(object):
def __init__(self, builder, build_number, revision, is_green):
self._builder = builder
self._number = build_number
self._revision = revision
self._is_green = is_green
@staticmethod
def build_url(builder, build_number):
return "%s/builds/%s" % (builder.url(), build_number)
def url(self):
return self.build_url(self.builder(), self._number)
def results_url(self):
results_directory = "r%s (%s)" % (self.revision(), self._number)
return "%s/%s" % (self._builder.results_url(), urllib.quote(results_directory))
def results_zip_url(self):
return "%s.zip" % self.results_url()
def builder(self):
return self._builder
def revision(self):
return self._revision
def is_green(self):
return self._is_green
def previous_build(self):
# previous_build() allows callers to avoid assuming build numbers are sequential.
# They may not be sequential across all master changes, or when non-trunk builds are made.
return self._builder.build(self._number - 1)
class BuildBot(object):
_builder_factory = Builder
_default_url = config_urls.chromium_buildbot_url
def __init__(self, url=None):
self.buildbot_url = url if url else self._default_url
self._builder_by_name = {}
def _parse_last_build_cell(self, builder, cell):
status_link = cell.find('a')
if status_link:
# Will be either a revision number or a build number
revision_string = status_link.string
# If revision_string has non-digits assume it's not a revision number.
builder['built_revision'] = int(revision_string) \
if not re.match('\D', revision_string) \
else None
# FIXME: We treat slave lost as green even though it is not to
# work around the Qts bot being on a broken internet connection.
# The real fix is https://bugs.webkit.org/show_bug.cgi?id=37099
builder['is_green'] = not re.search('fail', cell.renderContents()) or \
not not re.search('lost', cell.renderContents())
status_link_regexp = r"builders/(?P<builder_name>.*)/builds/(?P<build_number>\d+)"
link_match = re.match(status_link_regexp, status_link['href'])
builder['build_number'] = int(link_match.group("build_number"))
else:
# We failed to find a link in the first cell, just give up. This
# can happen if a builder is just-added, the first cell will just
# be "no build"
# Other parts of the code depend on is_green being present.
builder['is_green'] = False
builder['built_revision'] = None
builder['build_number'] = None
def _parse_current_build_cell(self, builder, cell):
activity_lines = cell.renderContents().split("<br />")
builder["activity"] = activity_lines[0] # normally "building" or "idle"
# The middle lines document how long left for any current builds.
match = re.match("(?P<pending_builds>\d) pending", activity_lines[-1])
builder["pending_builds"] = int(match.group("pending_builds")) if match else 0
def _parse_builder_status_from_row(self, status_row):
status_cells = status_row.findAll('td')
builder = {}
# First cell is the name
name_link = status_cells[0].find('a')
builder["name"] = unicode(name_link.string)
self._parse_last_build_cell(builder, status_cells[1])
self._parse_current_build_cell(builder, status_cells[2])
return builder
def _matches_regexps(self, builder_name, name_regexps):
for name_regexp in name_regexps:
if re.match(name_regexp, builder_name):
return True
return False
# FIXME: These _fetch methods should move to a networking class.
def _fetch_build_dictionary(self, builder, build_number):
# Note: filter=1 will remove None and {} and '', which cuts noise but can
# cause keys to be missing which you might otherwise expect.
# FIXME: The bot sends a *huge* amount of data for each request, we should
# find a way to reduce the response size further.
json_url = "%s/json/builders/%s/builds/%s?filter=1" % (self.buildbot_url, urllib.quote(builder.name()), build_number)
try:
return json.load(urllib2.urlopen(json_url))
except urllib2.URLError, err:
build_url = Build.build_url(builder, build_number)
_log.error("Error fetching data for %s build %s (%s, json: %s): %s" % (builder.name(), build_number, build_url, json_url, err))
return None
except ValueError, err:
build_url = Build.build_url(builder, build_number)
_log.error("Error decoding json data from %s: %s" % (build_url, err))
return None
def _fetch_one_box_per_builder(self):
build_status_url = "%s/one_box_per_builder" % self.buildbot_url
return urllib2.urlopen(build_status_url)
def _file_cell_text(self, file_cell):
"""Traverses down through firstChild elements until one containing a string is found, then returns that string"""
element = file_cell
while element.string is None and element.contents:
element = element.contents[0]
return element.string
def _parse_twisted_file_row(self, file_row):
string_or_empty = lambda string: unicode(string) if string else u""
file_cells = file_row.findAll('td')
return {
"filename": string_or_empty(self._file_cell_text(file_cells[0])),
"size": string_or_empty(self._file_cell_text(file_cells[1])),
"type": string_or_empty(self._file_cell_text(file_cells[2])),
"encoding": string_or_empty(self._file_cell_text(file_cells[3])),
}
def _parse_twisted_directory_listing(self, page):
soup = BeautifulSoup(page)
# HACK: Match only table rows with a class to ignore twisted header/footer rows.
file_rows = soup.find('table').findAll('tr', {'class': re.compile(r'\b(?:directory|file)\b')})
return [self._parse_twisted_file_row(file_row) for file_row in file_rows]
# FIXME: There should be a better way to get this information directly from twisted.
def _fetch_twisted_directory_listing(self, url):
return self._parse_twisted_directory_listing(urllib2.urlopen(url))
def builders(self):
return [self.builder_with_name(status["name"]) for status in self.builder_statuses()]
# This method pulls from /one_box_per_builder as an efficient way to get information about
def builder_statuses(self):
soup = BeautifulSoup(self._fetch_one_box_per_builder())
return [self._parse_builder_status_from_row(status_row) for status_row in soup.find('table').findAll('tr')]
def builder_with_name(self, name):
builder = self._builder_by_name.get(name)
if not builder:
builder = self._builder_factory(name, self)
self._builder_by_name[name] = builder
return builder
# This makes fewer requests than calling Builder.latest_build would. It grabs all builder
# statuses in one request using self.builder_statuses (fetching /one_box_per_builder instead of builder pages).
def _latest_builds_from_builders(self):
builder_statuses = self.builder_statuses()
return [self.builder_with_name(status["name"]).build(status["build_number"]) for status in builder_statuses]
def _build_at_or_before_revision(self, build, revision):
while build:
if build.revision() <= revision:
return build
build = build.previous_build()
def _fetch_builder_page(self, builder):
builder_page_url = "%s/builders/%s?numbuilds=100" % (self.buildbot_url, urllib2.quote(builder.name()))
return urllib2.urlopen(builder_page_url)
def _revisions_for_builder(self, builder):
soup = BeautifulSoup(self._fetch_builder_page(builder))
revisions = []
for status_row in soup.find('table').findAll('tr'):
revision_anchor = status_row.find('a')
table_cells = status_row.findAll('td')
if not table_cells or len(table_cells) < 3 or not table_cells[2].string:
continue
if revision_anchor and revision_anchor.string and re.match(r'^\d+$', revision_anchor.string):
revisions.append((int(revision_anchor.string), 'success' in table_cells[2].string))
return revisions
def _find_green_revision(self, builder_revisions):
revision_statuses = {}
for builder in builder_revisions:
for revision, succeeded in builder_revisions[builder]:
revision_statuses.setdefault(revision, set())
if succeeded and revision_statuses[revision] != None:
revision_statuses[revision].add(builder)
else:
revision_statuses[revision] = None
# In descending order, look for a revision X with successful builds
# Once we found X, check if remaining builders succeeded in the neighborhood of X.
revisions_in_order = sorted(revision_statuses.keys(), reverse=True)
for i, revision in enumerate(revisions_in_order):
if not revision_statuses[revision]:
continue
builders_succeeded_in_future = set()
for future_revision in sorted(revisions_in_order[:i + 1]):
if not revision_statuses[future_revision]:
break
builders_succeeded_in_future = builders_succeeded_in_future.union(revision_statuses[future_revision])
builders_succeeded_in_past = set()
for past_revision in revisions_in_order[i:]:
if not revision_statuses[past_revision]:
break
builders_succeeded_in_past = builders_succeeded_in_past.union(revision_statuses[past_revision])
if len(builders_succeeded_in_future) == len(builder_revisions) and len(builders_succeeded_in_past) == len(builder_revisions):
return revision
return None
| bsd-3-clause | -411,272,952,947,862,340 | 44.955 | 147 | 0.645577 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.