repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Pcolar/dpn-python-library | create_ingest.py | 1 | 1478 | #!/usr/bin/python
""" Synchronize DPN Ingest Records """
from app.dpn_python_library import *
import json
import requests
import sys
# get access info from the environment
dpn_host, dpn_token = load_environment()
# log_message("DPN Host: "+dpn_host+" DPN Token: "+dpn_token)
token_string="Token token="+dpn_token
dpn_headers={'Content-Type': 'application/json','Accept': 'application/json', 'Authorization': token_string}
# Read synchronization record from stdin
input_record=sys.stdin.read().replace('\n', '')
if len(input_record) == 0:
log_message("Record required as input")
exit(1)
sync_record=json.loads(input_record)
# Querystring to drive retrieval from Target
dpn_api_endpoint="/api-v2/ingest"
dpn_querystring=dpn_api_endpoint
# log_message("Querystring " + dpn_querystring)
# Fixity records are immutable once created
# We use a post to attempt creation, if it fails
# 409 - Duplicate - OK
# 201 - Created
# xxx - fail
create_response=requests.post(dpn_host+dpn_querystring, headers=dpn_headers, data=input_record)
if create_response.status_code == 201:
log_message("Created Ingest record for bag: "+sync_record['bag'])
else:
if create_response.status_code == 409:
# log_message("Duplicate Ingest record " + str(create_response.status_code)+" Bag UUID: "+ str(sync_record['bag']))
pass
else:
log_message("Ingest record create failed " + str(sync_record['ingest_id']) + " Status: " + str(create_response.status_code))
exit(0)
| bsd-3-clause | -2,562,262,990,243,185,700 | 33.372093 | 132 | 0.716509 | false |
syci/l10n-spain | l10n_es_vat_book/models/l10n_es_vat_book_invoice_tax_lines.py | 5 | 1993 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2017 Praxya (http://praxya.com/)
# Daniel Rodriguez Lijo <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class L10nEsVatBookInvoiceTaxLines(models.Model):
_name = 'l10n.es.vat.book.invoice.tax.lines'
name = fields.Char(
string="Tax Name")
tax_percent = fields.Float(
string="Tax percent")
tax_amount = fields.Float(
string="Tax amount")
amount_without_tax = fields.Float(
string="Base")
issued_invoice_line_id = fields.Many2one(
comodel_name='l10n.es.vat.book.issued.lines',
string="Issued invoice line")
received_invoice_line_id = fields.Many2one(
comodel_name='l10n.es.vat.book.received.lines',
string="Received invoice line")
rectification_issued_invoice_line_id = fields.Many2one(
comodel_name='l10n.es.vat.book.rectification.issued.lines',
string="Received invoice line")
rectification_received_invoice_line_id = fields.Many2one(
comodel_name='l10n.es.vat.book.rectification.received.lines',
string="Received invoice line")
| agpl-3.0 | -8,056,622,840,544,424,000 | 36.603774 | 78 | 0.628199 | false |
antoviaque/edx-platform | common/djangoapps/microsite_configuration/microsite.py | 41 | 4893 | """
This file implements the Microsite support for the Open edX platform.
A microsite enables the following features:
1) Mapping of sub-domain name to a 'brand', e.g. foo-university.edx.org
2) Present a landing page with a listing of courses that are specific to the 'brand'
3) Ability to swap out some branding elements in the website
"""
import inspect
from importlib import import_module
from django.conf import settings
from microsite_configuration.backends.base import BaseMicrositeBackend, BaseMicrositeTemplateBackend
__all__ = [
'is_request_in_microsite', 'get_value', 'has_override_value',
'get_template_path', 'get_value_for_org', 'get_all_orgs',
'clear', 'set_by_domain', 'enable_microsites', 'get_all_config',
'is_feature_enabled', 'enable_microsites_pre_startup',
]
BACKEND = None
TEMPLATES_BACKEND = None
def is_feature_enabled():
"""
Returns whether the feature flag to enable microsite has been set
"""
return settings.FEATURES.get('USE_MICROSITES', False)
def is_request_in_microsite():
"""
This will return if current request is a request within a microsite
"""
return BACKEND.is_request_in_microsite()
def get_value(val_name, default=None, **kwargs):
"""
Returns a value associated with the request's microsite, if present
"""
return BACKEND.get_value(val_name, default, **kwargs)
def get_dict(dict_name, default=None, **kwargs):
"""
Returns a dictionary product of merging the request's microsite and
the default value.
This can be used, for example, to return a merged dictonary from the
settings.FEATURES dict, including values defined at the microsite
"""
return BACKEND.get_dict(dict_name, default, **kwargs)
def has_override_value(val_name):
"""
Returns True/False whether a Microsite has a definition for the
specified named value
"""
return BACKEND.has_override_value(val_name)
def get_value_for_org(org, val_name, default=None):
"""
This returns a configuration value for a microsite which has an org_filter that matches
what is passed in
"""
return BACKEND.get_value_for_org(org, val_name, default)
def get_all_orgs():
"""
This returns a set of orgs that are considered within a microsite. This can be used,
for example, to do filtering
"""
return BACKEND.get_all_orgs()
def get_all_config():
"""
This returns a dict have all microsite configs. Each key in the dict represent a
microsite config.
"""
return BACKEND.get_all_config()
def clear():
"""
Clears out any microsite configuration from the current request/thread
"""
BACKEND.clear()
def set_by_domain(domain):
"""
For a given request domain, find a match in our microsite configuration
and make it available to the complete django request process
"""
BACKEND.set_config_by_domain(domain)
def enable_microsites_pre_startup(log):
"""
Prepare the feature settings that must be enabled before django.setup() or
autostartup() during the startup script
"""
if is_feature_enabled():
BACKEND.enable_microsites_pre_startup(log)
def enable_microsites(log):
"""
Enable the use of microsites during the startup script
"""
if is_feature_enabled():
BACKEND.enable_microsites(log)
def get_template(uri):
"""
Returns a template for the specified URI, None if none exists or if caller should
use default templates/search paths
"""
if not is_request_in_microsite():
return
return TEMPLATES_BACKEND.get_template(uri)
def get_template_path(relative_path, **kwargs):
"""
Returns a path (string) to a template
"""
if not is_request_in_microsite():
return relative_path
return TEMPLATES_BACKEND.get_template_path(relative_path, **kwargs)
def get_backend(name, expected_base_class, **kwds):
"""
Load a microsites backend and return an instance of it.
If backend is None (default) settings.MICROSITE_BACKEND is used.
Any additional args(kwds) will be used in the constructor of the backend.
"""
if not name:
return None
try:
parts = name.split('.')
module_name = '.'.join(parts[:-1])
class_name = parts[-1]
except IndexError:
raise ValueError('Invalid microsites backend %s' % name)
try:
module = import_module(module_name)
cls = getattr(module, class_name)
if not inspect.isclass(cls) or not issubclass(cls, expected_base_class):
raise TypeError
except (AttributeError, ValueError):
raise ValueError('Cannot find microsites backend %s' % module_name)
return cls(**kwds)
BACKEND = get_backend(settings.MICROSITE_BACKEND, BaseMicrositeBackend)
TEMPLATES_BACKEND = get_backend(settings.MICROSITE_TEMPLATE_BACKEND, BaseMicrositeTemplateBackend)
| agpl-3.0 | -779,269,672,445,813,800 | 27.614035 | 100 | 0.68833 | false |
Ademan/python-xlib-branch | utils/parsexbug.py | 3 | 7194 | #!/usr/bin/python
import sys
import os
import pprint
import struct
# Change path so we find Xlib
sys.path.insert(1, os.path.join(sys.path[0], '..'))
def dummy_buffer(str, x, y = sys.maxint):
return str[x:y]
__builtins__.buffer = dummy_buffer
from Xlib.protocol import display, request, rq, event
from Xlib import error
# We don't want any fancy dictwrapper, just plain mappings
rq.DictWrapper = lambda x: x
class BugFile:
def __init__(self, file):
self.file = file
self.cbuf = self.sbuf = ''
def read_client(self, bytes):
while len(self.cbuf) < bytes and self.file:
self.read_next()
d = self.cbuf[:bytes]
self.cbuf = self.cbuf[bytes:]
return d
def read_server(self, bytes):
while len(self.sbuf) < bytes and self.file:
self.read_next()
d = self.sbuf[:bytes]
self.sbuf = self.sbuf[bytes:]
return d
def read_next(self):
line = self.file.readline()
if line == '':
self.file = None
return
src = line[0]
length = int(line[1:-1])
data = self.file.read(length)
if src == 'C':
self.cbuf = self.cbuf + data
elif src == 'S':
self.sbuf = self.sbuf + data
else:
raise ValueError('Bad control line: %s' % line)
class ParseString:
def __init__(self, datafunc):
self.get_data = datafunc
self.data = ''
def __getitem__(self, i):
if i < 0:
raise ValueError('bad string index: %d' % i)
if len(self.data) <= i:
if not self.get_data:
raise RuntimeError('attempt to allocate more data after returning a new ParseString')
self.data = self.data + self.get_data(i - len(self.data) + 1)
return self.data[i]
def __getslice__(self, i, j):
if j == sys.maxint:
if self.get_data:
ps = ParseString(self.get_data)
self.get_data = None
return ps
else:
raise RuntimeError('attempt to allocate another ParseString')
if i < 0 or j < 0 or i > j:
raise ValueError('bad slice indices: [%d:%d]' % (i, j))
if len(self.data) < j:
if not self.get_data:
raise RuntimeError('attempt to allocate more data after returning a new ParseString')
self.data = self.data + self.get_data(j - len(self.data))
return self.data[i:j]
class DummyDisplay:
def get_resource_class(self, name):
return None
class ParseXbug:
def __init__(self, infile = sys.stdin, outfile = sys.stdout):
bf = BugFile(infile)
self.cdata = ParseString(bf.read_client)
sdata = ParseString(bf.read_server)
self.outfile = outfile
self.xpprint = pprint.PrettyPrinter(indent = 2, stream = outfile)
self.disp = DummyDisplay()
# Parse client setup request
r, self.cdata = display.ConnectionSetupRequest._request.parse_binary(self.cdata, self.disp)
self.print_xbug('request', 'ConnectionSetup', r)
# Parse server reply
r, sdata = display.ConnectionSetupRequest._reply.parse_binary(sdata, self.disp)
extra = r['additional_length'] * 4
del r['additional_length']
extradata = sdata[:extra]
sdata = sdata[extra:]
if r['status'] == 0:
r['reason'] = extradata[:r['reason_length']]
del r['status']
del r['reason_length']
self.print_xbug('error', 'ConnectionSetup', r)
return
elif r['status'] == 1:
r2, d = display.ConnectionSetupRequest._success_reply.parse_binary(extradata, self.disp)
del r['status']
del r['reason_length']
r.update(r2)
del r2
self.print_xbug('reply', 'ConnectionSetup', r)
else:
raise ValueError('bad connection setup reply status: %d' % r['status'])
self.last_serial = 0
self.last_request = None
while 1:
# Get next server item, always at least 32 bytes
d = sdata[:32]
if len(d) != 32:
# Print out remaining requests
try:
self.get_requests(sys.maxint)
except ValueError:
pass
return
sdata = sdata[32:]
# Check type
t = ord(d[0])
# Error
if t == 0:
# Code is second byte
code = ord(d[1])
# Fetch error class
estruct = error.xerror_class.get(code, error.XError)
r, d = estruct._fields.parse_binary(d, self.disp)
del r['type']
self.get_requests(r['sequence_number'])
self.print_xbug('error', estruct.__name__, r)
# Reply
elif t == 1:
# Get sequence number, and read corresponding request
sno = struct.unpack('=H', d[2:4])[0]
self.get_requests(sno)
# Get entire reply length
rlen = int(struct.unpack('=L', d[4:8])[0]) * 4
d = d + sdata[:rlen]
sdata = sdata[rlen:]
if self.last_request:
r, d = self.last_request._reply.parse_binary(d, self.disp)
self.print_xbug('reply', self.last_request.__name__, r)
else:
self.print_xbug('reply', 'Unknown',
{ 'sequence_number': sno })
# Some event
else:
estruct = event.event_class.get(t, event.AnyEvent)
r, d = estruct._fields.parse_binary(d, self.disp)
self.get_requests(r['sequence_number'])
self.print_xbug('event', estruct.__name__, r)
def get_requests(self, serial):
# Get request length
while self.last_serial < serial:
d = self.cdata[2:4]
if len(d) != 2:
raise ValueError('client request missing')
rlen = struct.unpack('=H', d)[0] * 4
d = self.cdata[:rlen]
if len(d) != rlen:
raise ValueError('client request missing')
self.cdata = self.cdata[rlen:]
opcode = ord(d[0])
self.last_request = request.major_codes.get(opcode)
self.last_serial = self.last_serial + 1
if self.last_request:
r, d = self.last_request._request.parse_binary(d, self.disp)
r['sequence_number'] = self.last_serial
self.print_xbug('request', self.last_request.__name__, r)
else:
self.print_xbug('request', 'Unknown (%d)' % opcode,
{ 'sequence_number': self.last_serial })
def print_xbug(self, rtype, name, data):
self.outfile.write('%-8s %s\n' % (rtype + ':', name))
self.xpprint.pprint(data)
self.outfile.write('\n')
if __name__ == '__main__':
ParseXbug()
| gpl-2.0 | 7,969,402,834,485,398,000 | 28.850622 | 101 | 0.516959 | false |
snower/TorMySQL | tormysql/platform/tornado.py | 1 | 13912 | # -*- coding: utf-8 -*-
# 17/12/8
# create by: snower
from __future__ import absolute_import, division, print_function
import sys
import socket
import errno
from tornado.iostream import IOStream as BaseIOStream, SSLIOStream as BaseSSLIOStream, StreamClosedError, _ERRNO_WOULDBLOCK, ssl, ssl_wrap_socket, _client_ssl_defaults
from tornado.concurrent import Future
from tornado.gen import coroutine
from tornado.ioloop import IOLoop
if sys.version_info[0] >= 3:
import io
StringIO = io.BytesIO
else:
import cStringIO
StringIO = cStringIO.StringIO
def current_ioloop():
return IOLoop.current()
class IOStream(BaseIOStream):
_read_callback = None
_write_callback = None
_connect_callback = None
_pending_callbacks = None
def __init__(self, address, bind_address, socket=None, *args, **kwargs):
if socket is None:
socket = self.init_socket(address, bind_address)
super(IOStream, self).__init__(socket, *args, **kwargs)
self._write_buffer = bytearray()
self._write_buffer_pos = 0
self._write_buffer_size = 0
def init_socket(self, address, bind_address):
if not isinstance(address, tuple):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
if bind_address is not None:
sock.bind((bind_address, 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return sock
def connect(self, address, connect_timeout = 0, server_hostname = None):
future = Future()
if connect_timeout:
def timeout():
self._loop_connect_timeout = None
if not self._connecting:
self.close((None, IOError("Connect timeout"), None))
self._loop_connect_timeout = self.io_loop.call_later(connect_timeout, timeout)
def connected(connect_future):
if self._loop_connect_timeout:
self.io_loop.remove_timeout(self._loop_connect_timeout)
self._loop_connect_timeout = None
if (hasattr(connect_future, "_exc_info") and connect_future._exc_info is not None) \
or (hasattr(connect_future, "_exception") and connect_future._exception is not None):
future.set_exception(connect_future.exception())
else:
future.set_result(connect_future.result())
connect_future = super(IOStream, self).connect(address, server_hostname=server_hostname)
connect_future.add_done_callback(connected)
return future
def _handle_events(self, fd, events):
if self._closed:
return
try:
if self._connecting:
self._handle_connect()
if self._closed:
return
if events & self.io_loop.READ:
self._handle_read()
if self._closed:
return
if events & self.io_loop.WRITE:
self._handle_write()
if self._closed:
return
if events & self.io_loop.ERROR:
self.error = self.get_fd_error()
self.io_loop.add_callback(self.close)
return
except Exception:
self.close(exc_info=True)
raise
def _handle_connect(self):
super(IOStream, self)._handle_connect()
if not self.closed():
self._state = self.io_loop.ERROR | self.io_loop.READ
if self._write_buffer:
self._state = self._state | self.io_loop.WRITE
self.io_loop.update_handler(self.fileno(), self._state)
def _handle_read(self):
chunk = True
while True:
try:
chunk = self.socket.recv(self.read_chunk_size)
if not chunk:
break
if self._read_buffer_size:
self._read_buffer += chunk
else:
self._read_buffer = bytearray(chunk)
self._read_buffer_size += len(chunk)
except (socket.error, IOError, OSError) as e:
en = e.errno if hasattr(e, 'errno') else e.args[0]
if en in _ERRNO_WOULDBLOCK:
break
if en == errno.EINTR:
continue
self.close(exc_info=True)
return
if self._read_future is not None and self._read_buffer_size >= self._read_bytes:
future, self._read_future = self._read_future, None
self._read_buffer, data = bytearray(), self._read_buffer
self._read_buffer_size = 0
self._read_bytes = 0
future.set_result(data)
if not chunk:
self.close()
return
def read(self, num_bytes):
assert self._read_future is None, "Already reading"
if self._closed:
raise StreamClosedError(real_error=self.error)
future = self._read_future = Future()
self._read_bytes = num_bytes
self._read_partial = False
if self._read_buffer_size >= self._read_bytes:
future, self._read_future = self._read_future, None
self._read_buffer, data = bytearray(), self._read_buffer
self._read_buffer_size = 0
self._read_bytes = 0
future.set_result(data)
return future
read_bytes = read
def _handle_write(self):
try:
num_bytes = self.socket.send(memoryview(self._write_buffer)[self._write_buffer_pos: self._write_buffer_pos + self._write_buffer_size])
self._write_buffer_pos += num_bytes
self._write_buffer_size -= num_bytes
except (socket.error, IOError, OSError) as e:
en = e.errno if hasattr(e, 'errno') else e.args[0]
if en not in _ERRNO_WOULDBLOCK:
self.close(exc_info=True)
return
if not self._write_buffer_size:
if self._write_buffer_pos > 0:
self._write_buffer = bytearray()
self._write_buffer_pos = 0
if self._state & self.io_loop.WRITE:
self._state = self._state & ~self.io_loop.WRITE
self.io_loop.update_handler(self.fileno(), self._state)
def write(self, data):
assert isinstance(data, (bytes, bytearray))
if self._closed:
raise StreamClosedError(real_error=self.error)
if data:
if self._write_buffer_size:
self._write_buffer += data
else:
self._write_buffer = bytearray(data)
self._write_buffer_size += len(data)
if not self._connecting:
self._handle_write()
if self._write_buffer_size:
if not self._state & self.io_loop.WRITE:
self._state = self._state | self.io_loop.WRITE
self.io_loop.update_handler(self.fileno(), self._state)
def start_tls(self, server_side, ssl_options=None, server_hostname=None, connect_timeout=None):
if (self._read_callback or self._read_future or
self._write_callback or self._write_futures or
self._connect_callback or self._connect_future or
self._pending_callbacks or self._closed or
self._read_buffer or self._write_buffer):
raise ValueError("IOStream is not idle; cannot convert to SSL")
if ssl_options is None:
ssl_options = _client_ssl_defaults
socket = self.socket
self.io_loop.remove_handler(socket)
self.socket = None
socket = ssl_wrap_socket(socket, ssl_options,
server_hostname=server_hostname,
server_side=server_side,
do_handshake_on_connect=False)
orig_close_callback = self._close_callback
self._close_callback = None
future = Future()
ssl_stream = SSLIOStream(socket, ssl_options=ssl_options)
# Wrap the original close callback so we can fail our Future as well.
# If we had an "unwrap" counterpart to this method we would need
# to restore the original callback after our Future resolves
# so that repeated wrap/unwrap calls don't build up layers.
def close_callback():
if not future.done():
# Note that unlike most Futures returned by IOStream,
# this one passes the underlying error through directly
# instead of wrapping everything in a StreamClosedError
# with a real_error attribute. This is because once the
# connection is established it's more helpful to raise
# the SSLError directly than to hide it behind a
# StreamClosedError (and the client is expecting SSL
# issues rather than network issues since this method is
# named start_tls).
future.set_exception(ssl_stream.error or StreamClosedError())
if orig_close_callback is not None:
orig_close_callback()
if connect_timeout:
def timeout():
ssl_stream._loop_connect_timeout = None
if not future.done():
ssl_stream.close((None, IOError("Connect timeout"), None))
ssl_stream._loop_connect_timeout = self.io_loop.call_later(connect_timeout, timeout)
ssl_stream.set_close_callback(close_callback)
ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream)
ssl_stream.max_buffer_size = self.max_buffer_size
ssl_stream.read_chunk_size = self.read_chunk_size
return future
class SSLIOStream(IOStream, BaseSSLIOStream):
def __init__(self, socket, *args, **kwargs):
self._ssl_options = kwargs.pop('ssl_options', _client_ssl_defaults)
IOStream.__init__(self, None, None, socket, *args, **kwargs)
self._ssl_accepting = True
self._handshake_reading = False
self._handshake_writing = False
self._ssl_connect_callback = None
self._loop_connect_timeout = None
self._server_hostname = None
# If the socket is already connected, attempt to start the handshake.
try:
self.socket.getpeername()
except socket.error:
pass
else:
# Indirectly start the handshake, which will run on the next
# IOLoop iteration and then the real IO state will be set in
# _handle_events.
self._add_io_state(self.io_loop.WRITE)
def _handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
chunk = True
while True:
try:
chunk = self.socket.recv(self.read_chunk_size)
if not chunk:
break
if self._read_buffer_size:
self._read_buffer += chunk
else:
self._read_buffer = bytearray(chunk)
self._read_buffer_size += len(chunk)
except ssl.SSLError as e:
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
break
self.close(exc_info=True)
return
except (socket.error, IOError, OSError) as e:
en = e.errno if hasattr(e, 'errno') else e.args[0]
if en in _ERRNO_WOULDBLOCK:
break
if en == errno.EINTR:
continue
self.close(exc_info=True)
return
if self._read_future is not None and self._read_buffer_size >= self._read_bytes:
future, self._read_future = self._read_future, None
self._read_buffer, data = bytearray(), self._read_buffer
self._read_buffer_size = 0
self._read_bytes = 0
future.set_result(data)
if not chunk:
self.close()
return
def _handle_write(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
try:
num_bytes = self.socket.send(memoryview(self._write_buffer)[
self._write_buffer_pos: self._write_buffer_pos + self._write_buffer_size])
self._write_buffer_pos += num_bytes
self._write_buffer_size -= num_bytes
except ssl.SSLError as e:
if e.args[0] != ssl.SSL_ERROR_WANT_WRITE:
self.close(exc_info=True)
return
except (socket.error, IOError, OSError) as e:
en = e.errno if hasattr(e, 'errno') else e.args[0]
if en not in _ERRNO_WOULDBLOCK:
self.close(exc_info=True)
return
if not self._write_buffer_size:
if self._write_buffer_pos > 0:
self._write_buffer = bytearray()
self._write_buffer_pos = 0
if self._state & self.io_loop.WRITE:
self._state = self._state & ~self.io_loop.WRITE
self.io_loop.update_handler(self.fileno(), self._state)
def _run_ssl_connect_callback(self):
if self._state & self.io_loop.WRITE:
self._state = self._state & ~self.io_loop.WRITE
self.io_loop.update_handler(self.fileno(), self._state)
if hasattr(BaseSSLIOStream, "_finish_ssl_connect"):
BaseSSLIOStream._finish_ssl_connect(self)
else:
BaseSSLIOStream._run_ssl_connect_callback(self)
def makefile(self, mode):
return self | mit | -7,511,479,114,017,912,000 | 36.704607 | 167 | 0.559876 | false |
cmsdaq/hltd | lib/cx_Oracle-7.1/test/Features12_1.py | 1 | 18985 | #------------------------------------------------------------------------------
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
#
# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved.
#
# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta,
# Canada. All rights reserved.
#------------------------------------------------------------------------------
"""Module for testing features introduced in 12.1"""
import TestEnv
import cx_Oracle
import datetime
# import sys
# if sys.version_info > (3,):
# long = int
class TestCase(TestEnv.BaseTestCase):
def testArrayDMLRowCountsOff(self):
"test executing with arraydmlrowcounts mode disabled"
self.cursor.execute("truncate table TestArrayDML")
rows = [ (1, "First"),
(2, "Second") ]
sql = "insert into TestArrayDML (IntCol,StringCol) values (:1,:2)"
self.cursor.executemany(sql, rows, arraydmlrowcounts = False)
self.assertRaises(cx_Oracle.DatabaseError,
self.cursor.getarraydmlrowcounts)
rows = [ (3, "Third"),
(4, "Fourth") ]
self.cursor.executemany(sql, rows)
self.assertRaises(cx_Oracle.DatabaseError,
self.cursor.getarraydmlrowcounts)
def testArrayDMLRowCountsOn(self):
"test executing with arraydmlrowcounts mode enabled"
self.cursor.execute("truncate table TestArrayDML")
rows = [ ( 1, "First", 100),
( 2, "Second", 200),
( 3, "Third", 300),
( 4, "Fourth", 300),
( 5, "Fifth", 300) ]
sql = "insert into TestArrayDML (IntCol,StringCol,IntCol2) " \
"values (:1,:2,:3)"
self.cursor.executemany(sql, rows, arraydmlrowcounts = True)
self.connection.commit()
self.assertEqual(self.cursor.getarraydmlrowcounts(), [1, 1, 1, 1, 1])
self.cursor.execute("select count(*) from TestArrayDML")
count, = self.cursor.fetchone()
self.assertEqual(count, len(rows))
def testBindPLSQLBooleanCollectionIn(self):
"test binding a boolean collection (in)"
typeObj = self.connection.gettype("PKG_TESTBOOLEANS.UDT_BOOLEANLIST")
obj = typeObj.newobject()
obj.setelement(1, True)
obj.extend([True, False, True, True, False, True])
result = self.cursor.callfunc("pkg_TestBooleans.TestInArrays", int,
(obj,))
self.assertEqual(result, 5)
def testBindPLSQLBooleanCollectionOut(self):
"test binding a boolean collection (out)"
typeObj = self.connection.gettype("PKG_TESTBOOLEANS.UDT_BOOLEANLIST")
obj = typeObj.newobject()
self.cursor.callproc("pkg_TestBooleans.TestOutArrays", (6, obj))
self.assertEqual(obj.aslist(), [True, False, True, False, True, False])
def testBindPLSQLDateCollectionIn(self):
"test binding a PL/SQL date collection (in)"
typeObj = self.connection.gettype("PKG_TESTDATEARRAYS.UDT_DATELIST")
obj = typeObj.newobject()
obj.setelement(1, datetime.datetime(2016, 2, 5))
obj.append(datetime.datetime(2016, 2, 8, 12, 15, 30))
obj.append(datetime.datetime(2016, 2, 12, 5, 44, 30))
result = self.cursor.callfunc("pkg_TestDateArrays.TestInArrays",
cx_Oracle.NUMBER, (2, datetime.datetime(2016, 2, 1), obj))
self.assertEqual(result, 24.75)
def testBindPLSQLDateCollectionInOut(self):
"test binding a PL/SQL date collection (in/out)"
typeObj = self.connection.gettype("PKG_TESTDATEARRAYS.UDT_DATELIST")
obj = typeObj.newobject()
obj.setelement(1, datetime.datetime(2016, 1, 1))
obj.append(datetime.datetime(2016, 1, 7))
obj.append(datetime.datetime(2016, 1, 13))
obj.append(datetime.datetime(2016, 1, 19))
self.cursor.callproc("pkg_TestDateArrays.TestInOutArrays", (4, obj))
self.assertEqual(obj.aslist(),
[datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 14),
datetime.datetime(2016, 1, 20),
datetime.datetime(2016, 1, 26)])
def testBindPLSQLDateCollectionOut(self):
"test binding a PL/SQL date collection (out)"
typeObj = self.connection.gettype("PKG_TESTDATEARRAYS.UDT_DATELIST")
obj = typeObj.newobject()
self.cursor.callproc("pkg_TestDateArrays.TestOutArrays", (3, obj))
self.assertEqual(obj.aslist(),
[datetime.datetime(2002, 12, 13, 4, 48),
datetime.datetime(2002, 12, 14, 9, 36),
datetime.datetime(2002, 12, 15, 14, 24)])
def testBindPLSQLNumberCollectionIn(self):
"test binding a PL/SQL number collection (in)"
typeObj = self.connection.gettype("PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST")
obj = typeObj.newobject()
obj.setelement(1, 10)
obj.extend([20, 30, 40, 50])
result = self.cursor.callfunc("pkg_TestNumberArrays.TestInArrays", int,
(5, obj))
self.assertEqual(result, 155)
def testBindPLSQLNumberCollectionInOut(self):
"test binding a PL/SQL number collection (in/out)"
typeObj = self.connection.gettype("PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST")
obj = typeObj.newobject()
obj.setelement(1, 5)
obj.extend([8, 3, 2])
self.cursor.callproc("pkg_TestNumberArrays.TestInOutArrays", (4, obj))
self.assertEqual(obj.aslist(), [50, 80, 30, 20])
def testBindPLSQLNumberCollectionOut(self):
"test binding a PL/SQL number collection (out)"
typeObj = self.connection.gettype("PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST")
obj = typeObj.newobject()
self.cursor.callproc("pkg_TestNumberArrays.TestOutArrays", (3, obj))
self.assertEqual(obj.aslist(), [100, 200, 300])
def testBindPLSQLRecordArray(self):
"test binding an array of PL/SQL records (in)"
recType = self.connection.gettype("PKG_TESTRECORDS.UDT_RECORD")
arrayType = self.connection.gettype("PKG_TESTRECORDS.UDT_RECORDARRAY")
arrayObj = arrayType.newobject()
for i in range(3):
obj = recType.newobject()
obj.NUMBERVALUE = i + 1
obj.STRINGVALUE = "String in record #%d" % (i + 1)
obj.DATEVALUE = datetime.datetime(2017, i + 1, 1)
obj.TIMESTAMPVALUE = datetime.datetime(2017, 1, i + 1)
obj.BOOLEANVALUE = (i % 2) == 1
arrayObj.append(obj)
result = self.cursor.callfunc("pkg_TestRecords.TestInArrays", str,
(arrayObj,))
self.assertEqual(result,
"udt_Record(1, 'String in record #1', " \
"to_date('2017-01-01', 'YYYY-MM-DD'), " \
"to_timestamp('2017-01-01 00:00:00', " \
"'YYYY-MM-DD HH24:MI:SS'), false); " \
"udt_Record(2, 'String in record #2', " \
"to_date('2017-02-01', 'YYYY-MM-DD'), " \
"to_timestamp('2017-01-02 00:00:00', " \
"'YYYY-MM-DD HH24:MI:SS'), true); " \
"udt_Record(3, 'String in record #3', " \
"to_date('2017-03-01', 'YYYY-MM-DD'), " \
"to_timestamp('2017-01-03 00:00:00', " \
"'YYYY-MM-DD HH24:MI:SS'), false)")
def testBindPLSQLRecordIn(self):
"test binding a PL/SQL record (in)"
typeObj = self.connection.gettype("PKG_TESTRECORDS.UDT_RECORD")
obj = typeObj.newobject()
obj.NUMBERVALUE = 18
obj.STRINGVALUE = "A string in a record"
obj.DATEVALUE = datetime.datetime(2016, 2, 15)
obj.TIMESTAMPVALUE = datetime.datetime(2016, 2, 12, 14, 25, 36)
obj.BOOLEANVALUE = False
result = self.cursor.callfunc("pkg_TestRecords.GetStringRep", str,
(obj,))
self.assertEqual(result,
"udt_Record(18, 'A string in a record', " \
"to_date('2016-02-15', 'YYYY-MM-DD'), " \
"to_timestamp('2016-02-12 14:25:36', " \
"'YYYY-MM-DD HH24:MI:SS'), false)")
def testBindPLSQLRecordOut(self):
"test binding a PL/SQL record (out)"
typeObj = self.connection.gettype("PKG_TESTRECORDS.UDT_RECORD")
obj = typeObj.newobject()
obj.NUMBERVALUE = 5
obj.STRINGVALUE = "Test value"
obj.DATEVALUE = datetime.datetime.today()
obj.TIMESTAMPVALUE = datetime.datetime.today()
obj.BOOLEANVALUE = False
self.cursor.callproc("pkg_TestRecords.TestOut", (obj,))
self.assertEqual(obj.NUMBERVALUE, 25)
self.assertEqual(obj.STRINGVALUE, "String in record")
self.assertEqual(obj.DATEVALUE, datetime.datetime(2016, 2, 16))
self.assertEqual(obj.TIMESTAMPVALUE,
datetime.datetime(2016, 2, 16, 18, 23, 55))
self.assertEqual(obj.BOOLEANVALUE, True)
def testBindPLSQLStringCollectionIn(self):
"test binding a PL/SQL string collection (in)"
typeObj = self.connection.gettype("PKG_TESTSTRINGARRAYS.UDT_STRINGLIST")
obj = typeObj.newobject()
obj.setelement(1, "First element")
obj.setelement(2, "Second element")
obj.setelement(3, "Third element")
result = self.cursor.callfunc("pkg_TestStringArrays.TestInArrays", int,
(5, obj))
self.assertEqual(result, 45)
def testBindPLSQLStringCollectionInOut(self):
"test binding a PL/SQL string collection (in/out)"
typeObj = self.connection.gettype("PKG_TESTSTRINGARRAYS.UDT_STRINGLIST")
obj = typeObj.newobject()
obj.setelement(1, "The first element")
obj.append("The second element")
obj.append("The third and final element")
self.cursor.callproc("pkg_TestStringArrays.TestInOutArrays", (3, obj))
self.assertEqual(obj.aslist(),
['Converted element # 1 originally had length 17',
'Converted element # 2 originally had length 18',
'Converted element # 3 originally had length 27'])
def testBindPLSQLStringCollectionOut(self):
"test binding a PL/SQL string collection (out)"
typeObj = self.connection.gettype("PKG_TESTSTRINGARRAYS.UDT_STRINGLIST")
obj = typeObj.newobject()
self.cursor.callproc("pkg_TestStringArrays.TestOutArrays", (4, obj))
self.assertEqual(obj.aslist(),
['Test out element # 1',
'Test out element # 2',
'Test out element # 3',
'Test out element # 4'])
def testBindPLSQLStringCollectionOutWithHoles(self):
"test binding a PL/SQL string collection (out with holes)"
typeObj = self.connection.gettype("PKG_TESTSTRINGARRAYS.UDT_STRINGLIST")
obj = typeObj.newobject()
self.cursor.callproc("pkg_TestStringArrays.TestIndexBy", (obj,))
self.assertEqual(obj.first(), -1048576)
self.assertEqual(obj.last(), 8388608)
self.assertEqual(obj.next(-576), 284)
self.assertEqual(obj.prev(284), -576)
self.assertEqual(obj.size(), 4)
self.assertEqual(obj.exists(-576), True)
self.assertEqual(obj.exists(-577), False)
self.assertEqual(obj.getelement(284), 'Third element')
self.assertEqual(obj.aslist(),
["First element", "Second element", "Third element",
"Fourth element"])
self.assertEqual(obj.asdict(),
{ -1048576 : 'First element',
-576 : 'Second element',
284 : 'Third element',
8388608: 'Fourth element' })
obj.delete(-576)
obj.delete(284)
self.assertEqual(obj.aslist(), ["First element", "Fourth element"])
self.assertEqual(obj.asdict(),
{ -1048576 : 'First element',
8388608: 'Fourth element' })
def testExceptionInIteration(self):
"test executing with arraydmlrowcounts with exception"
self.cursor.execute("truncate table TestArrayDML")
rows = [ (1, "First"),
(2, "Second"),
(2, "Third"),
(4, "Fourth") ]
sql = "insert into TestArrayDML (IntCol,StringCol) values (:1,:2)"
self.assertRaises(cx_Oracle.DatabaseError, self.cursor.executemany,
sql, rows, arraydmlrowcounts = True)
self.assertEqual(self.cursor.getarraydmlrowcounts(), [1, 1])
def testExecutingDelete(self):
"test executing delete statement with arraydmlrowcount mode"
self.cursor.execute("truncate table TestArrayDML")
rows = [ (1, "First", 100),
(2, "Second", 200),
(3, "Third", 300),
(4, "Fourth", 300),
(5, "Fifth", 300),
(6, "Sixth", 400),
(7, "Seventh", 400),
(8, "Eighth", 500) ]
sql = "insert into TestArrayDML (IntCol,StringCol,IntCol2) " \
"values (:1, :2, :3)"
self.cursor.executemany(sql, rows)
rows = [ (200,), (300,), (400,) ]
statement = "delete from TestArrayDML where IntCol2 = :1"
self.cursor.executemany(statement, rows, arraydmlrowcounts = True)
self.assertEqual(self.cursor.getarraydmlrowcounts(), [1, 3, 2])
def testExecutingUpdate(self):
"test executing update statement with arraydmlrowcount mode"
self.cursor.execute("truncate table TestArrayDML")
rows = [ (1, "First",100),
(2, "Second",200),
(3, "Third",300),
(4, "Fourth",300),
(5, "Fifth",300),
(6, "Sixth",400),
(7, "Seventh",400),
(8, "Eighth",500) ]
sql = "insert into TestArrayDML (IntCol,StringCol,IntCol2) " \
"values (:1, :2, :3)"
self.cursor.executemany(sql, rows)
rows = [ ("One", 100),
("Two", 200),
("Three", 300),
("Four", 400) ]
sql = "update TestArrayDML set StringCol = :1 where IntCol2 = :2"
self.cursor.executemany(sql, rows, arraydmlrowcounts = True)
self.assertEqual(self.cursor.getarraydmlrowcounts(), [1, 1, 3, 2])
def testImplicitResults(self):
"test getimplicitresults() returns the correct data"
self.cursor.execute("""
declare
c1 sys_refcursor;
c2 sys_refcursor;
begin
open c1 for
select NumberCol
from TestNumbers
where IntCol between 3 and 5;
dbms_sql.return_result(c1);
open c2 for
select NumberCol
from TestNumbers
where IntCol between 7 and 10;
dbms_sql.return_result(c2);
end;""")
results = self.cursor.getimplicitresults()
self.assertEqual(len(results), 2)
self.assertEqual([n for n, in results[0]], [3.75, 5, 6.25])
self.assertEqual([n for n, in results[1]], [8.75, 10, 11.25, 12.5])
def testImplicitResultsNoStatement(self):
"test getimplicitresults() without executing a statement"
self.assertRaises(cx_Oracle.InterfaceError,
self.cursor.getimplicitresults)
def testInsertWithBatchError(self):
"test executing insert with multiple distinct batch errors"
self.cursor.execute("truncate table TestArrayDML")
rows = [ (1, "First", 100),
(2, "Second", 200),
(2, "Third", 300),
(4, "Fourth", 400),
(5, "Fourth", 1000)]
sql = "insert into TestArrayDML (IntCol, StringCol, IntCol2) " \
"values (:1, :2, :3)"
self.cursor.executemany(sql, rows, batcherrors = True,
arraydmlrowcounts = True)
user = TestEnv.GetMainUser()
expectedErrors = [
( 4, 1438, "ORA-01438: value larger than specified " \
"precision allowed for this column" ),
( 2, 1, "ORA-00001: unique constraint " \
"(%s.TESTARRAYDML_PK) violated" % user.upper())
]
actualErrors = [(e.offset, e.code, e.message) \
for e in self.cursor.getbatcherrors()]
self.assertEqual(actualErrors, expectedErrors)
self.assertEqual(self.cursor.getarraydmlrowcounts(), [1, 1, 0, 1, 0])
def testBatchErrorFalse(self):
"test batcherrors mode set to False"
self.cursor.execute("truncate table TestArrayDML")
rows = [ (1, "First", 100),
(2, "Second", 200),
(2, "Third", 300) ]
sql = "insert into TestArrayDML (IntCol, StringCol, IntCol2) " \
"values (:1, :2, :3)"
self.assertRaises(cx_Oracle.IntegrityError,
self.cursor.executemany, sql, rows, batcherrors = False)
def testUpdatewithBatchError(self):
"test executing in succession with batch error"
self.cursor.execute("truncate table TestArrayDML")
rows = [ (1, "First", 100),
(2, "Second", 200),
(3, "Third", 300),
(4, "Second", 300),
(5, "Fifth", 300),
(6, "Sixth", 400),
(6, "Seventh", 400),
(8, "Eighth", 100) ]
sql = "insert into TestArrayDML (IntCol, StringCol, IntCol2) " \
"values (:1, :2, :3)"
self.cursor.executemany(sql, rows, batcherrors = True)
user = TestEnv.GetMainUser()
expectedErrors = [
( 6, 1, "ORA-00001: unique constraint " \
"(%s.TESTARRAYDML_PK) violated" % user.upper())
]
actualErrors = [(e.offset, e.code, e.message) \
for e in self.cursor.getbatcherrors()]
self.assertEqual(actualErrors, expectedErrors)
rows = [ (101, "First"),
(201, "Second"),
(3000, "Third"),
(900, "Ninth"),
(301, "Third") ]
sql = "update TestArrayDML set IntCol2 = :1 where StringCol = :2"
self.cursor.executemany(sql, rows, arraydmlrowcounts = True,
batcherrors = True)
expectedErrors = [
( 2, 1438, "ORA-01438: value larger than specified " \
"precision allowed for this column" )
]
actualErrors = [(e.offset, e.code, e.message) \
for e in self.cursor.getbatcherrors()]
self.assertEqual(actualErrors, expectedErrors)
self.assertEqual(self.cursor.getarraydmlrowcounts(),
[1, 2, 0, 0, 1])
self.assertEqual(self.cursor.rowcount, 4)
if __name__ == "__main__":
TestEnv.RunTestCases()
| lgpl-3.0 | -8,089,385,629,371,357,000 | 43.775943 | 80 | 0.568659 | false |
AMOSoft/fabtools | fabtools/require/files.py | 1 | 7724 | """
Files and directories
=====================
This module provides high-level tools for managing files and
directories.
"""
from pipes import quote
from tempfile import mkstemp
from urlparse import urlparse
import hashlib
import os
from fabric.api import hide, put, run, settings
from fabtools.files import (
group as _group,
is_file,
is_dir,
md5sum,
mode as _mode,
owner as _owner,
umask,
)
from fabtools.utils import run_as_root
BLOCKSIZE = 2 ** 20 # 1MB
def directory(path, use_sudo=False, owner='', group='', mode=''):
"""
Require a directory to exist.
::
from fabtools import require
require.directory('/tmp/mydir', owner='alice', use_sudo=True)
.. note:: This function can be accessed directly from the
``fabtools.require`` module for convenience.
"""
func = use_sudo and run_as_root or run
if not is_dir(path):
func('mkdir -p "%(path)s"' % locals())
# Ensure correct owner
if (owner and _owner(path, use_sudo) != owner) or \
(group and _group(path, use_sudo) != group):
func('chown %(owner)s:%(group)s "%(path)s"' % locals())
# Ensure correct mode
if mode and _mode(path, use_sudo) != mode:
func('chmod %(mode)s "%(path)s"' % locals())
def directories(path_list, use_sudo=False, owner='', group='', mode=''):
"""
Require a list of directories to exist.
::
from fabtools import require
dirs=[
'/tmp/mydir',
'/tmp/mydear',
'/tmp/my/dir'
]
require.directories(dirs, owner='alice', mode='750')
.. note:: This function can be accessed directly from the
``fabtools.require`` module for convenience.
"""
for path in path_list:
directory(path, use_sudo, owner, group, mode)
def file(path=None, contents=None, source=None, url=None, md5=None,
use_sudo=False, owner=None, group='', mode=None, verify_remote=True,
temp_dir='/tmp'):
"""
Require a file to exist and have specific contents and properties.
You can provide either:
- *contents*: the required contents of the file::
from fabtools import require
require.file('/tmp/hello.txt', contents='Hello, world')
- *source*: the local path of a file to upload::
from fabtools import require
require.file('/tmp/hello.txt', source='files/hello.txt')
- *url*: the URL of a file to download (*path* is then optional)::
from fabric.api import cd
from fabtools import require
with cd('tmp'):
require.file(url='http://example.com/files/hello.txt')
If *verify_remote* is ``True`` (the default), then an MD5 comparison
will be used to check whether the remote file is the same as the
source. If this is ``False``, the file will be assumed to be the
same if it is present. This is useful for very large files, where
generating an MD5 sum may take a while.
When providing either the *contents* or the *source* parameter, Fabric's
``put`` function will be used to upload the file to the remote host.
When ``use_sudo`` is ``True``, the file will first be uploaded to a temporary
directory, then moved to its final location. The default temporary
directory is ``/tmp``, but can be overridden with the *temp_dir* parameter.
If *temp_dir* is an empty string, then the user's home directory will
be used.
If `use_sudo` is `True`, then the remote file will be owned by root,
and its mode will reflect root's default *umask*. The optional *owner*,
*group* and *mode* parameters can be used to override these properties.
.. note:: This function can be accessed directly from the
``fabtools.require`` module for convenience.
"""
func = use_sudo and run_as_root or run
# 1) Only a path is given
if path and not (contents or source or url):
assert path
if not is_file(path):
func('touch "%(path)s"' % locals())
# 2) A URL is specified (path is optional)
elif url:
if not path:
path = os.path.basename(urlparse(url).path)
if not is_file(path) or md5 and md5sum(path) != md5:
func('wget --progress=dot:mega %(url)s -O %(path)s' % locals())
# 3) A local filename, or a content string, is specified
else:
if source:
assert not contents
t = None
else:
fd, source = mkstemp()
t = os.fdopen(fd, 'w')
t.write(contents)
t.close()
if verify_remote:
# Avoid reading the whole file into memory at once
digest = hashlib.md5()
f = open(source, 'rb')
try:
while True:
d = f.read(BLOCKSIZE)
if not d:
break
digest.update(d)
finally:
f.close()
else:
digest = None
if (not is_file(path, use_sudo=use_sudo) or
(verify_remote and
md5sum(path, use_sudo=use_sudo) != digest.hexdigest())):
with settings(hide('running')):
put(source, path, use_sudo=use_sudo, temp_dir=temp_dir)
if t is not None:
os.unlink(source)
# Ensure correct owner
if use_sudo and owner is None:
owner = 'root'
if (owner and _owner(path, use_sudo) != owner) or \
(group and _group(path, use_sudo) != group):
func('chown %(owner)s:%(group)s "%(path)s"' % locals())
# Ensure correct mode
if use_sudo and mode is None:
mode = oct(0666 & ~int(umask(use_sudo=True), base=8))
if mode and _mode(path, use_sudo) != mode:
func('chmod %(mode)s "%(path)s"' % locals())
def template_file(path=None, template_contents=None, template_source=None,
context=None, **kwargs):
"""
Require a file whose contents is defined by a template.
"""
if template_contents is None:
with open(template_source) as template_file:
template_contents = template_file.read()
if context is None:
context = {}
file(path=path, contents=template_contents % context, **kwargs)
def temporary_directory(template=None):
"""
Require a temporary directory.
The directory is created using the ``mktemp`` command. It will
be created in ``/tmp``, unless the ``TMPDIR`` environment variable
is set to another location. ::
from fabtools.require.files import temporary_directory
tmp_dir = temporary_directory()
You can choose a specific location and name template for the
temporary directory: ::
from fabtools.require.files import temporary_directory
tmp_dir = temporary_directory('/var/tmp/temp.XXXXXX')
You can also call this function as a context manager. In this case,
the directory and its contents will be automatically deleted when
exiting the block: ::
from pipes import quote
from posixpath import join
from fabtools.require.files import temporary_directory
with temporary_directory() as tmp_dir:
path = join(tmp_dir, 'foo')
run('touch %s' % quote(path))
"""
options = ['--directory']
if template:
options.append(template)
options = ' '.join(options)
with hide('running', 'stdout'):
path = run('mktemp %s' % options)
return TemporaryDirectory(path)
class TemporaryDirectory(str):
def __enter__(self):
return self
def __exit__(self, type, value, tb):
run('rm -rf %s' % quote(self))
| bsd-2-clause | 4,745,734,662,193,207,000 | 28.59387 | 81 | 0.598912 | false |
jmanoel7/my_dot_files | .local/share/gedit/plugins/quickSwitcher/plugin.py | 1 | 4552 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Matevž Pogačar ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
import os
from gi.repository import GObject, Gdk, Gtk, Gedit, PeasGtk
from .dialog import QuickSwitchDialog
from .settings import QuickSwitchSettings
from xml.etree import ElementTree
width = None
height = None
color = None
position_type = None
position_x = None
position_y = None
class QuickSwitcher(GObject.Object, Gedit.WindowActivatable, PeasGtk.Configurable):
__gtype_name__ = "QuickSwitcher"
window = GObject.property(type=Gedit.Window)
default_width = 160
default_height = 190
default_color = "#f0f099"
default_position_type = 1
default_position_x = 0
default_position_y = 0
dialog = None
file_path = None
#Constructor. Initializes plugin on Gedit start.
def __init__(self):
GObject.Object.__init__(self)
self.file_path = os.path.expanduser('~') + "/.local/share/gedit/plugins/quickSwitcher/"
et = ElementTree.parse(self.file_path + "config.xml")
global width
global height
global color
global position_type
global position_x
global position_y
width = self.default_width
height = self.default_height
color = self.default_color
position_type = self.default_position_type
position_x = self.default_position_x
position_y = self.default_position_y
width_xml = et.find("width")
if not width_xml == None:
width = int(width_xml.text)
if width < 150:
width = 150
height_xml = et.find("height")
if not height_xml == None:
height = int(height_xml.text)
if height < 150:
height = 150
color_xml = et.find("color")
if not color_xml == None:
color = color_xml.text
position_xml = et.find("position")
if not position_xml == None:
position_type_xml = position_xml.find("type")
if not position_type_xml == None:
position_type = int(position_type_xml.text)
if position_type < 1 or position_type > 4:
position_type = 1
position_x_xml = position_xml.find("coordinatex")
if not position_x_xml == None:
position_x = int(position_x_xml.text)
position_y_xml = position_xml.find("coordinatey")
if not position_y_xml == None:
position_y = int(position_y_xml.text)
def do_activate(self):
self.kpe_handler = self.window.connect('key-press-event', self.on_quick_switcher)
def do_deactivate(self):
pass
def do_update_state(self):
pass
#When user calls this plugin (<Ctrl>e is pressed or Tools->Quick Switch is pressed)
#Initializes class QuickSwitchDialog
def on_quick_switcher(self, widget, event):
defmod = Gtk.accelerator_get_default_mod_mask() & event.state
if event.keyval != 0x65 or defmod != Gdk.ModifierType.CONTROL_MASK:
return
tabs = self.get_tabs()
global width
global height
global color
global position_type
global position_x
global position_y
self.dialog = QuickSwitchDialog(self, tabs, width, height, color, position_type, position_x, position_y)
self.dialog.show_all()
#Returns all opened tabs.
def get_tabs(self):
tabs = []
for document in self.window.get_documents():
tabs.append(document.get_short_name_for_display())
return tabs
#When user calls configuration dialog (via Edit->Preferences->Plugins).
def do_create_configure_widget(self):
settingsGrid = QuickSwitchSettings()
settingsGrid.setSettings(self, self.file_path)
return settingsGrid.do_create_configure_widget()
#Function called from QuickSwitchSettings so all changes will be available right away (no need for restarting Gedit).
def setSettings(self, new_width, new_height, new_color, new_position_type, new_position_x, new_position_y):
global width
global height
global color
global position_type
global position_x
global position_y
width = new_width
height = new_height
color = new_color
position_type = new_position_type
position_x = new_position_x
position_y = new_position_y
| gpl-3.0 | 4,204,226,165,111,456,000 | 28.545455 | 118 | 0.722418 | false |
qwefi/nova | nova/api/ec2/cloud.py | 1 | 82484 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cloud Controller: Implementation of EC2 REST API calls, which are
dispatched to other nodes via AMQP RPC. State is via distributed
datastore.
"""
import base64
import time
from oslo.config import cfg
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api.metadata import password
from nova.api import validator
from nova import availability_zones
from nova import block_device
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import vm_states
from nova import db
from nova import exception
from nova.image import s3
from nova import network
from nova.network.security_group import neutron_driver
from nova.objects import instance as instance_obj
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
from nova import servicegroup
from nova import utils
from nova import volume
ec2_opts = [
cfg.StrOpt('ec2_host',
default='$my_ip',
help='the ip of the ec2 api server'),
cfg.StrOpt('ec2_dmz_host',
default='$my_ip',
help='the internal ip of the ec2 api server'),
cfg.IntOpt('ec2_port',
default=8773,
help='the port of the ec2 api server'),
cfg.StrOpt('ec2_scheme',
default='http',
help='the protocol to use when connecting to the ec2 api '
'server (http, https)'),
cfg.StrOpt('ec2_path',
default='/services/Cloud',
help='the path prefix used to call the ec2 api server'),
cfg.ListOpt('region_list',
default=[],
help='list of region=fqdn pairs separated by commas'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vpn_key_suffix', 'nova.cloudpipe.pipelib')
CONF.import_opt('internal_service_availability_zone',
'nova.availability_zones')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def validate_ec2_id(val):
if not validator.validate_str()(val):
raise exception.InvalidInstanceIDMalformed(val=val)
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
raise exception.InvalidInstanceIDMalformed(val=val)
# EC2 API can return the following values as documented in the EC2 API
# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
# ApiReference-ItemType-InstanceStateType.html
# pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 |
# stopped 80
_STATE_DESCRIPTION_MAP = {
None: inst_state.PENDING,
vm_states.ACTIVE: inst_state.RUNNING,
vm_states.BUILDING: inst_state.PENDING,
vm_states.DELETED: inst_state.TERMINATED,
vm_states.SOFT_DELETED: inst_state.TERMINATED,
vm_states.STOPPED: inst_state.STOPPED,
vm_states.PAUSED: inst_state.PAUSE,
vm_states.SUSPENDED: inst_state.SUSPEND,
vm_states.RESCUED: inst_state.RESCUE,
vm_states.RESIZED: inst_state.RESIZE,
}
def _state_description(vm_state, _shutdown_terminate):
"""Map the vm state to the server status string."""
# Note(maoy): We do not provide EC2 compatibility
# in shutdown_terminate flag behavior. So we ignore
# it here.
name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
return {'code': inst_state.name_to_code(name),
'name': name}
def _parse_block_device_mapping(bdm):
"""Parse BlockDeviceMappingItemType into flat hash
BlockDevicedMapping.<N>.DeviceName
BlockDevicedMapping.<N>.Ebs.SnapshotId
BlockDevicedMapping.<N>.Ebs.VolumeSize
BlockDevicedMapping.<N>.Ebs.DeleteOnTermination
BlockDevicedMapping.<N>.Ebs.NoDevice
BlockDevicedMapping.<N>.VirtualName
=> remove .Ebs and allow volume id in SnapshotId
"""
ebs = bdm.pop('ebs', None)
if ebs:
ec2_id = ebs.pop('snapshot_id', None)
if ec2_id:
if ec2_id.startswith('snap-'):
bdm['snapshot_id'] = ec2utils.ec2_snap_id_to_uuid(ec2_id)
elif ec2_id.startswith('vol-'):
bdm['volume_id'] = ec2utils.ec2_vol_id_to_uuid(ec2_id)
ebs.setdefault('delete_on_termination', True)
bdm.update(ebs)
return bdm
def _properties_get_mappings(properties):
return block_device.mappings_prepend_dev(properties.get('mappings', []))
def _format_block_device_mapping(bdm):
"""Construct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
keys = (('deviceName', 'device_name'),
('virtualName', 'virtual_name'))
item = {}
for name, k in keys:
if k in bdm:
item[name] = bdm[k]
if bdm.get('no_device'):
item['noDevice'] = True
if ('snapshot_id' in bdm) or ('volume_id' in bdm):
ebs_keys = (('snapshotId', 'snapshot_id'),
('snapshotId', 'volume_id'), # snapshotId is abused
('volumeSize', 'volume_size'),
('deleteOnTermination', 'delete_on_termination'))
ebs = {}
for name, k in ebs_keys:
if k in bdm:
if k == 'snapshot_id':
ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k])
elif k == 'volume_id':
ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k])
else:
ebs[name] = bdm[k]
assert 'snapshotId' in ebs
item['ebs'] = ebs
return item
def _format_mappings(properties, result):
"""Format multiple BlockDeviceMappingItemType."""
mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']}
for m in _properties_get_mappings(properties)
if block_device.is_swap_or_ephemeral(m['virtual'])]
block_device_mapping = [_format_block_device_mapping(bdm) for bdm in
properties.get('block_device_mapping', [])]
# NOTE(yamahata): overwrite mappings with block_device_mapping
for bdm in block_device_mapping:
for i in range(len(mappings)):
if bdm['deviceName'] == mappings[i]['deviceName']:
del mappings[i]
break
mappings.append(bdm)
# NOTE(yamahata): trim ebs.no_device == true. Is this necessary?
mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))]
if mappings:
result['blockDeviceMapping'] = mappings
def db_to_inst_obj(context, db_instance):
# NOTE(danms): This is a temporary helper method for converting
# Instance DB objects to NovaObjects without needing to re-query.
inst_obj = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), db_instance,
expected_attrs=['system_metadata', 'metadata'])
return inst_obj
class CloudController(object):
"""CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
self.image_service = s3.S3ImageService()
self.network_api = network.API()
self.volume_api = volume.API()
self.security_group_api = get_cloud_security_group_api()
self.compute_api = compute.API(network_api=self.network_api,
volume_api=self.volume_api,
security_group_api=self.security_group_api)
self.keypair_api = compute_api.KeypairAPI()
self.servicegroup_api = servicegroup.API()
def __str__(self):
return 'CloudController'
def _enforce_valid_instance_ids(self, context, instance_ids):
# NOTE(mikal): Amazon's implementation of the EC2 API requires that
# _all_ instance ids passed in be valid.
instances = {}
if instance_ids:
for ec2_id in instance_ids:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
instances[ec2_id] = instance
return instances
def _get_image_state(self, image):
# NOTE(vish): fallback status if image_state isn't set
state = image.get('status')
if state == 'active':
state = 'available'
return image['properties'].get('image_state', state)
def describe_availability_zones(self, context, **kwargs):
if ('zone_name' in kwargs and
'verbose' in kwargs['zone_name'] and
context.is_admin):
return self._describe_availability_zones_verbose(context,
**kwargs)
else:
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
result = []
for zone in available_zones:
# Hide internal_service_availability_zone
if zone == CONF.internal_service_availability_zone:
continue
result.append({'zoneName': zone,
'zoneState': "available"})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def _describe_availability_zones_verbose(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
# Available services
enabled_services = db.service_get_all(context, False)
enabled_services = availability_zones.set_availability_zones(context,
enabled_services)
zone_hosts = {}
host_services = {}
for service in enabled_services:
zone_hosts.setdefault(service['availability_zone'], [])
if service['host'] not in zone_hosts[service['availability_zone']]:
zone_hosts[service['availability_zone']].append(
service['host'])
host_services.setdefault(service['availability_zone'] +
service['host'], [])
host_services[service['availability_zone'] + service['host']].\
append(service)
result = []
for zone in available_zones:
result.append({'zoneName': zone,
'zoneState': "available"})
for host in zone_hosts[zone]:
result.append({'zoneName': '|- %s' % host,
'zoneState': ''})
for service in host_services[zone + host]:
alive = self.servicegroup_api.service_is_up(service)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if service['disabled']:
active = 'disabled'
result.append({'zoneName': '| |- %s' % service['binary'],
'zoneState': ('%s %s %s'
% (active, art,
service['updated_at']))})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def describe_regions(self, context, region_name=None, **kwargs):
if CONF.region_list:
regions = []
for region in CONF.region_list:
name, _sep, host = region.partition('=')
endpoint = '%s://%s:%s%s' % (CONF.ec2_scheme,
host,
CONF.ec2_port,
CONF.ec2_path)
regions.append({'regionName': name,
'regionEndpoint': endpoint})
else:
regions = [{'regionName': 'nova',
'regionEndpoint': '%s://%s:%s%s' % (CONF.ec2_scheme,
CONF.ec2_host,
CONF.ec2_port,
CONF.ec2_path)}]
return {'regionInfo': regions}
def describe_snapshots(self,
context,
snapshot_id=None,
owner=None,
restorable_by=None,
**kwargs):
if snapshot_id:
snapshots = []
for ec2_id in snapshot_id:
internal_id = ec2utils.ec2_snap_id_to_uuid(ec2_id)
snapshot = self.volume_api.get_snapshot(
context,
snapshot_id=internal_id)
snapshots.append(snapshot)
else:
snapshots = self.volume_api.get_all_snapshots(context)
formatted_snapshots = []
for s in snapshots:
formatted = self._format_snapshot(context, s)
if formatted:
formatted_snapshots.append(formatted)
return {'snapshotSet': formatted_snapshots}
def _format_snapshot(self, context, snapshot):
# NOTE(mikal): this is just a set of strings in cinder. If they
# implement an enum, then we should move this code to use it. The
# valid ec2 statuses are "pending", "completed", and "error".
status_map = {'new': 'pending',
'creating': 'pending',
'available': 'completed',
'active': 'completed',
'deleting': 'pending',
'deleted': None,
'error': 'error'}
mapped_status = status_map.get(snapshot['status'], snapshot['status'])
if not mapped_status:
return None
s = {}
s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id'])
s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id'])
s['status'] = mapped_status
s['startTime'] = snapshot['created_at']
s['progress'] = snapshot['progress']
s['ownerId'] = snapshot['project_id']
s['volumeSize'] = snapshot['volume_size']
s['description'] = snapshot['display_description']
return s
def create_snapshot(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
LOG.audit(_("Create snapshot of volume %s"), volume_id,
context=context)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
args = (context, volume_id, kwargs.get('name'),
kwargs.get('description'))
if kwargs.get('force', False):
snapshot = self.volume_api.create_snapshot_force(*args)
else:
snapshot = self.volume_api.create_snapshot(*args)
db.ec2_snapshot_create(context, snapshot['id'])
return self._format_snapshot(context, snapshot)
def delete_snapshot(self, context, snapshot_id, **kwargs):
snapshot_id = ec2utils.ec2_snap_id_to_uuid(snapshot_id)
self.volume_api.delete_snapshot(context, snapshot_id)
return True
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = self.keypair_api.get_key_pairs(context, context.user_id)
if key_name is not None:
key_pairs = [x for x in key_pairs if x['name'] in key_name]
#If looking for non existent key pair
if key_name is not None and not key_pairs:
msg = _('Could not find key pair(s): %s') % ','.join(key_name)
raise exception.KeypairNotFound(msg,
code="InvalidKeyPair.Duplicate")
result = []
for key_pair in key_pairs:
# filter out the vpn keys
suffix = CONF.vpn_key_suffix
if context.is_admin or not key_pair['name'].endswith(suffix):
result.append({
'keyName': key_pair['name'],
'keyFingerprint': key_pair['fingerprint'],
})
return {'keySet': result}
def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context)
try:
keypair = self.keypair_api.create_key_pair(context,
context.user_id,
key_name)
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise exception.EC2APIError(msg, code='ResourceLimitExceeded')
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint'],
'keyMaterial': keypair['private_key']}
# TODO(vish): when context is no longer an object, pass it here
def import_key_pair(self, context, key_name, public_key_material,
**kwargs):
LOG.audit(_("Import key %s"), key_name, context=context)
public_key = base64.b64decode(public_key_material)
try:
keypair = self.keypair_api.import_key_pair(context,
context.user_id,
key_name,
public_key)
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise exception.EC2APIError(msg)
except exception.InvalidKeypair:
msg = _("Keypair data is invalid")
raise exception.EC2APIError(msg)
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint']}
def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
self.keypair_api.delete_key_pair(context, context.user_id,
key_name)
except exception.NotFound:
# aws returns true even if the key doesn't exist
pass
return True
def describe_security_groups(self, context, group_name=None, group_id=None,
**kwargs):
search_opts = ec2utils.search_opts_from_filters(kwargs.get('filter'))
raw_groups = self.security_group_api.list(context,
group_name,
group_id,
context.project_id,
search_opts=search_opts)
groups = [self._format_security_group(context, g) for g in raw_groups]
return {'securityGroupInfo':
list(sorted(groups,
key=lambda k: (k['ownerId'], k['groupName'])))}
def _format_security_group(self, context, group):
g = {}
g['groupDescription'] = group['description']
g['groupName'] = group['name']
g['ownerId'] = group['project_id']
g['ipPermissions'] = []
for rule in group['rules']:
r = {}
r['groups'] = []
r['ipRanges'] = []
if rule['group_id']:
if rule.get('grantee_group'):
source_group = rule['grantee_group']
r['groups'] += [{'groupName': source_group['name'],
'userId': source_group['project_id']}]
else:
# rule is not always joined with grantee_group
# for example when using neutron driver.
source_group = self.security_group_api.get(
context, id=rule['group_id'])
r['groups'] += [{'groupName': source_group.get('name'),
'userId': source_group.get('project_id')}]
if rule['protocol']:
r['ipProtocol'] = rule['protocol'].lower()
r['fromPort'] = rule['from_port']
r['toPort'] = rule['to_port']
g['ipPermissions'] += [dict(r)]
else:
for protocol, min_port, max_port in (('icmp', -1, -1),
('tcp', 1, 65535),
('udp', 1, 65535)):
r['ipProtocol'] = protocol
r['fromPort'] = min_port
r['toPort'] = max_port
g['ipPermissions'] += [dict(r)]
else:
r['ipProtocol'] = rule['protocol']
r['fromPort'] = rule['from_port']
r['toPort'] = rule['to_port']
r['ipRanges'] += [{'cidrIp': rule['cidr']}]
g['ipPermissions'] += [r]
return g
def _rule_args_to_dict(self, context, kwargs):
rules = []
if 'groups' not in kwargs and 'ip_ranges' not in kwargs:
rule = self._rule_dict_last_step(context, **kwargs)
if rule:
rules.append(rule)
return rules
if 'ip_ranges' in kwargs:
rules = self._cidr_args_split(kwargs)
else:
rules = [kwargs]
finalset = []
for rule in rules:
if 'groups' in rule:
groups_values = self._groups_args_split(rule)
for groups_value in groups_values:
final = self._rule_dict_last_step(context, **groups_value)
finalset.append(final)
else:
final = self._rule_dict_last_step(context, **rule)
finalset.append(final)
return finalset
def _cidr_args_split(self, kwargs):
cidr_args_split = []
cidrs = kwargs['ip_ranges']
for key, cidr in cidrs.iteritems():
mykwargs = kwargs.copy()
del mykwargs['ip_ranges']
mykwargs['cidr_ip'] = cidr['cidr_ip']
cidr_args_split.append(mykwargs)
return cidr_args_split
def _groups_args_split(self, kwargs):
groups_args_split = []
groups = kwargs['groups']
for key, group in groups.iteritems():
mykwargs = kwargs.copy()
del mykwargs['groups']
if 'group_name' in group:
mykwargs['source_security_group_name'] = group['group_name']
if 'user_id' in group:
mykwargs['source_security_group_owner_id'] = group['user_id']
if 'group_id' in group:
mykwargs['source_security_group_id'] = group['group_id']
groups_args_split.append(mykwargs)
return groups_args_split
def _rule_dict_last_step(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr_ip=None, user_id=None,
source_security_group_name=None,
source_security_group_owner_id=None):
if source_security_group_name:
source_project_id = self._get_source_project_id(context,
source_security_group_owner_id)
source_security_group = db.security_group_get_by_name(
context.elevated(),
source_project_id,
source_security_group_name)
notfound = exception.SecurityGroupNotFound
if not source_security_group:
raise notfound(security_group_id=source_security_group_name)
group_id = source_security_group['id']
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr_ip)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def _validate_group_identifier(self, group_name, group_id):
if not group_name and not group_id:
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
def _validate_rulevalues(self, rulesvalues):
if not rulesvalues:
err = _("%s Not enough parameters to build a valid rule")
raise exception.EC2APIError(err % rulesvalues)
def _validate_security_group_protocol(self, values):
validprotocols = ['tcp', 'udp', 'icmp', '6', '17', '1']
if 'ip_protocol' in values and \
values['ip_protocol'] not in validprotocols:
err = _('Invalid IP protocol %s.') % values['ip_protocol']
raise exception.EC2APIError(message=err, code="400")
def revoke_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
self._validate_group_identifier(group_name, group_id)
security_group = self.security_group_api.get(context, group_name,
group_id)
prevalues = kwargs.get('ip_permissions', [kwargs])
rule_ids = []
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group['id']
rule_ids.append(self.security_group_api.rule_exists(
security_group, values_for_rule))
rule_ids = [id for id in rule_ids if id]
if rule_ids:
self.security_group_api.remove_rules(context, security_group,
rule_ids)
return True
raise exception.EC2APIError(_("No rule for the specified parameters."))
# TODO(soren): This has only been tested with Boto as the client.
# Unfortunately, it seems Boto is using an old API
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
self._validate_group_identifier(group_name, group_id)
security_group = self.security_group_api.get(context, group_name,
group_id)
prevalues = kwargs.get('ip_permissions', [kwargs])
postvalues = []
for values in prevalues:
self._validate_security_group_protocol(values)
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group['id']
if self.security_group_api.rule_exists(security_group,
values_for_rule):
err = _('%s - This rule already exists in group')
raise exception.EC2APIError(err % values_for_rule)
postvalues.append(values_for_rule)
if postvalues:
self.security_group_api.add_rules(context, security_group['id'],
security_group['name'], postvalues)
return True
raise exception.EC2APIError(_("No rule for the specified parameters."))
def _get_source_project_id(self, context, source_security_group_owner_id):
if source_security_group_owner_id:
# Parse user:project for source group.
source_parts = source_security_group_owner_id.split(':')
# If no project name specified, assume it's same as user name.
# Since we're looking up by project name, the user name is not
# used here. It's only read for EC2 API compatibility.
if len(source_parts) == 2:
source_project_id = source_parts[1]
else:
source_project_id = source_parts[0]
else:
source_project_id = context.project_id
return source_project_id
def create_security_group(self, context, group_name, group_description):
if isinstance(group_name, unicode):
group_name = group_name.encode('utf-8')
if CONF.ec2_strict_validation:
# EC2 specification gives constraints for name and description:
# Accepts alphanumeric characters, spaces, dashes, and underscores
allowed = '^[a-zA-Z0-9_\- ]+$'
self.security_group_api.validate_property(group_name, 'name',
allowed)
self.security_group_api.validate_property(group_description,
'description', allowed)
else:
# Amazon accepts more symbols.
# So, allow POSIX [:print:] characters.
allowed = r'^[\x20-\x7E]+$'
self.security_group_api.validate_property(group_name, 'name',
allowed)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
def delete_security_group(self, context, group_name=None, group_id=None,
**kwargs):
if not group_name and not group_id:
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
security_group = self.security_group_api.get(context, group_name,
group_id)
self.security_group_api.destroy(context, security_group)
return True
def get_password_data(self, context, instance_id, **kwargs):
# instance_id may be passed in as a list of instances
if isinstance(instance_id, list):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
validate_ec2_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
output = password.extract_password(instance)
# NOTE(vish): this should be timestamp from the metadata fields
# but it isn't important enough to implement properly
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"passwordData": output}
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
# instance_id may be passed in as a list of instances
if isinstance(instance_id, list):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
validate_ec2_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
output = self.compute_api.get_console_output(context, instance)
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"output": base64.b64encode(output)}
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
volumes = []
for ec2_id in volume_id:
validate_ec2_id(ec2_id)
internal_id = ec2utils.ec2_vol_id_to_uuid(ec2_id)
volume = self.volume_api.get(context, internal_id)
volumes.append(volume)
else:
volumes = self.volume_api.get_all(context)
volumes = [self._format_volume(context, v) for v in volumes]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
valid_ec2_api_volume_status_map = {
'attaching': 'in-use',
'detaching': 'in-use'}
instance_ec2_id = None
if volume.get('instance_uuid', None):
instance_uuid = volume['instance_uuid']
instance = db.instance_get_by_uuid(context.elevated(),
instance_uuid)
instance_ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
v = {}
v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id'])
v['status'] = valid_ec2_api_volume_status_map.get(volume['status'],
volume['status'])
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
v['createTime'] = volume['created_at']
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': False,
'device': volume['mountpoint'],
'instanceId': instance_ec2_id,
'status': 'attached',
'volumeId': v['volumeId']}]
else:
v['attachmentSet'] = [{}]
if volume.get('snapshot_id') is not None:
v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id'])
else:
v['snapshotId'] = None
return v
def create_volume(self, context, **kwargs):
snapshot_ec2id = kwargs.get('snapshot_id', None)
if snapshot_ec2id is not None:
snapshot_id = ec2utils.ec2_snap_id_to_uuid(kwargs['snapshot_id'])
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
LOG.audit(_("Create volume from snapshot %s"), snapshot_ec2id,
context=context)
else:
snapshot = None
LOG.audit(_("Create volume of %s GB"),
kwargs.get('size'),
context=context)
create_kwargs = dict(snapshot=snapshot,
volume_type=kwargs.get('volume_type'),
metadata=kwargs.get('metadata'),
availability_zone=kwargs.get('availability_zone'))
volume = self.volume_api.create(context,
kwargs.get('size'),
kwargs.get('name'),
kwargs.get('description'),
**create_kwargs)
db.ec2_volume_create(context, volume['id'])
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return self._format_volume(context, dict(volume))
def delete_volume(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
try:
self.volume_api.delete(context, volume_id)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Delete Failed'))
return True
def attach_volume(self, context,
volume_id,
instance_id,
device, **kwargs):
validate_ec2_id(instance_id)
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
LOG.audit(_('Attach volume %(volume_id)s to instance %(instance_id)s '
'at %(device)s'),
{'volume_id': volume_id,
'instance_id': instance_id,
'device': device},
context=context)
try:
self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Attach Failed.'))
volume = self.volume_api.get(context, volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_inst_id(instance_uuid),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _get_instance_from_volume(self, context, volume):
if volume['instance_uuid']:
try:
return db.instance_get_by_uuid(context,
volume['instance_uuid'])
except exception.InstanceNotFound:
pass
raise exception.VolumeUnattached(volume_id=volume['id'])
def detach_volume(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume = self.volume_api.get(context, volume_id)
instance = self._get_instance_from_volume(context, volume)
try:
self.compute_api.detach_volume(context, instance, volume)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Detach Volume Failed.'))
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_inst_id(
volume['instance_uuid']),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _format_kernel_id(self, context, instance_ref, result, key):
kernel_uuid = instance_ref['kernel_id']
if kernel_uuid is None or kernel_uuid == '':
return
result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki')
def _format_ramdisk_id(self, context, instance_ref, result, key):
ramdisk_uuid = instance_ref['ramdisk_id']
if ramdisk_uuid is None or ramdisk_uuid == '':
return
result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid,
'ari')
def describe_instance_attribute(self, context, instance_id, attribute,
**kwargs):
def _unsupported_attribute(instance, result):
raise exception.EC2APIError(_('attribute not supported: %s') %
attribute)
def _format_attr_block_device_mapping(instance, result):
tmp = {}
self._format_instance_root_device_name(instance, tmp)
self._format_instance_bdm(context, instance['uuid'],
tmp['rootDeviceName'], result)
def _format_attr_disable_api_termination(instance, result):
result['disableApiTermination'] = instance['disable_terminate']
def _format_attr_group_set(instance, result):
CloudController._format_group_set(instance, result)
def _format_attr_instance_initiated_shutdown_behavior(instance,
result):
if instance['shutdown_terminate']:
result['instanceInitiatedShutdownBehavior'] = 'terminate'
else:
result['instanceInitiatedShutdownBehavior'] = 'stop'
def _format_attr_instance_type(instance, result):
self._format_instance_type(instance, result)
def _format_attr_kernel(instance, result):
self._format_kernel_id(context, instance, result, 'kernel')
def _format_attr_ramdisk(instance, result):
self._format_ramdisk_id(context, instance, result, 'ramdisk')
def _format_attr_root_device_name(instance, result):
self._format_instance_root_device_name(instance, result)
def _format_attr_source_dest_check(instance, result):
_unsupported_attribute(instance, result)
def _format_attr_user_data(instance, result):
result['userData'] = base64.b64decode(instance['user_data'])
attribute_formatter = {
'blockDeviceMapping': _format_attr_block_device_mapping,
'disableApiTermination': _format_attr_disable_api_termination,
'groupSet': _format_attr_group_set,
'instanceInitiatedShutdownBehavior':
_format_attr_instance_initiated_shutdown_behavior,
'instanceType': _format_attr_instance_type,
'kernel': _format_attr_kernel,
'ramdisk': _format_attr_ramdisk,
'rootDeviceName': _format_attr_root_device_name,
'sourceDestCheck': _format_attr_source_dest_check,
'userData': _format_attr_user_data,
}
fn = attribute_formatter.get(attribute)
if fn is None:
raise exception.EC2APIError(
_('attribute not supported: %s') % attribute)
validate_ec2_id(instance_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
result = {'instance_id': instance_id}
fn(instance, result)
return result
def describe_instances(self, context, **kwargs):
# Optional DescribeInstances argument
instance_id = kwargs.get('instance_id', None)
filters = kwargs.get('filter', None)
instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
instance_id=instance_id,
instance_cache=instances,
filter=filters)
def describe_instances_v6(self, context, **kwargs):
# Optional DescribeInstancesV6 argument
instance_id = kwargs.get('instance_id', None)
filters = kwargs.get('filter', None)
instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
instance_id=instance_id,
instance_cache=instances,
filter=filters,
use_v6=True)
def _format_describe_instances(self, context, **kwargs):
return {'reservationSet': self._format_instances(context, **kwargs)}
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id=reservation_id)
assert len(i) == 1
return i[0]
def _format_terminate_instances(self, context, instance_id,
previous_states):
instances_set = []
for (ec2_id, previous_state) in zip(instance_id, previous_states):
i = {}
i['instanceId'] = ec2_id
i['previousState'] = _state_description(previous_state['vm_state'],
previous_state['shutdown_terminate'])
try:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
i['currentState'] = _state_description(instance['vm_state'],
instance['shutdown_terminate'])
except exception.NotFound:
i['currentState'] = _state_description(vm_states.DELETED,
True)
instances_set.append(i)
return {'instancesSet': instances_set}
def _format_instance_bdm(self, context, instance_uuid, root_device_name,
result):
"""Format InstanceBlockDeviceMappingResponseItemType."""
root_device_type = 'instance-store'
mapping = []
for bdm in block_device.legacy_mapping(
db.block_device_mapping_get_all_by_instance(context,
instance_uuid)):
volume_id = bdm['volume_id']
if (volume_id is None or bdm['no_device']):
continue
if (bdm['device_name'] == root_device_name and
(bdm['snapshot_id'] or bdm['volume_id'])):
assert not bdm['virtual_name']
root_device_type = 'ebs'
vol = self.volume_api.get(context, volume_id)
LOG.debug(_("vol = %s\n"), vol)
# TODO(yamahata): volume attach time
ebs = {'volumeId': ec2utils.id_to_ec2_vol_id(volume_id),
'deleteOnTermination': bdm['delete_on_termination'],
'attachTime': vol['attach_time'] or '',
'status': vol['attach_status'], }
res = {'deviceName': bdm['device_name'],
'ebs': ebs, }
mapping.append(res)
if mapping:
result['blockDeviceMapping'] = mapping
result['rootDeviceType'] = root_device_type
@staticmethod
def _format_instance_root_device_name(instance, result):
result['rootDeviceName'] = (instance.get('root_device_name') or
block_device.DEFAULT_ROOT_DEV_NAME)
@staticmethod
def _format_instance_type(instance, result):
instance_type = flavors.extract_flavor(instance)
result['instanceType'] = instance_type['name']
@staticmethod
def _format_group_set(instance, result):
security_group_names = []
if instance.get('security_groups'):
for security_group in instance['security_groups']:
security_group_names.append(security_group['name'])
result['groupSet'] = utils.convert_to_list_dict(
security_group_names, 'groupId')
def _format_instances(self, context, instance_id=None, use_v6=False,
instances_cache=None, **search_opts):
# TODO(termie): this method is poorly named as its name does not imply
# that it will be making a variety of database calls
# rather than simply formatting a bunch of instances that
# were handed to it
reservations = {}
if not instances_cache:
instances_cache = {}
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
instances = []
for ec2_id in instance_id:
if ec2_id in instances_cache:
instances.append(instances_cache[ec2_id])
else:
try:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context,
ec2_id)
instance = self.compute_api.get(context, instance_uuid)
except exception.NotFound:
continue
instances.append(instance)
else:
try:
# always filter out deleted instances
search_opts['deleted'] = False
instances = self.compute_api.get_all(context,
search_opts=search_opts,
sort_dir='asc')
except exception.NotFound:
instances = []
for instance in instances:
if not context.is_admin:
if pipelib.is_vpn_image(instance['image_ref']):
continue
i = {}
instance_uuid = instance['uuid']
ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
i['instanceId'] = ec2_id
image_uuid = instance['image_ref']
i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid)
self._format_kernel_id(context, instance, i, 'kernelId')
self._format_ramdisk_id(context, instance, i, 'ramdiskId')
i['instanceState'] = _state_description(
instance['vm_state'], instance['shutdown_terminate'])
fixed_ip = None
floating_ip = None
ip_info = ec2utils.get_ip_info_for_instance(context, instance)
if ip_info['fixed_ips']:
fixed_ip = ip_info['fixed_ips'][0]
if ip_info['floating_ips']:
floating_ip = ip_info['floating_ips'][0]
if ip_info['fixed_ip6s']:
i['dnsNameV6'] = ip_info['fixed_ip6s'][0]
if CONF.ec2_private_dns_show_ip:
i['privateDnsName'] = fixed_ip
else:
i['privateDnsName'] = instance['hostname']
i['privateIpAddress'] = fixed_ip
i['publicDnsName'] = floating_ip
i['ipAddress'] = floating_ip or fixed_ip
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
i['keyName'] = instance['key_name']
i['tagSet'] = []
for k, v in utils.instance_meta(instance).iteritems():
i['tagSet'].append({'key': k, 'value': v})
if context.is_admin:
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
instance['host'])
i['productCodesSet'] = utils.convert_to_list_dict([],
'product_codes')
self._format_instance_type(instance, i)
i['launchTime'] = instance['created_at']
i['amiLaunchIndex'] = instance['launch_index']
self._format_instance_root_device_name(instance, i)
self._format_instance_bdm(context, instance['uuid'],
i['rootDeviceName'], i)
host = instance['host']
zone = ec2utils.get_availability_zone_by_host(host)
i['placement'] = {'availabilityZone': zone}
if instance['reservation_id'] not in reservations:
r = {}
r['reservationId'] = instance['reservation_id']
r['ownerId'] = instance['project_id']
self._format_group_set(instance, r)
r['instancesSet'] = []
reservations[instance['reservation_id']] = r
reservations[instance['reservation_id']]['instancesSet'].append(i)
return list(reservations.values())
def describe_addresses(self, context, public_ip=None, **kwargs):
if public_ip:
floatings = []
for address in public_ip:
floating = self.network_api.get_floating_ip_by_address(context,
address)
floatings.append(floating)
else:
floatings = self.network_api.get_floating_ips_by_project(context)
addresses = [self._format_address(context, f) for f in floatings]
return {'addressesSet': addresses}
def _format_address(self, context, floating_ip):
ec2_id = None
if floating_ip['fixed_ip_id']:
fixed_id = floating_ip['fixed_ip_id']
fixed = self.network_api.get_fixed_ip(context, fixed_id)
if fixed['instance_uuid'] is not None:
ec2_id = ec2utils.id_to_ec2_inst_id(fixed['instance_uuid'])
address = {'public_ip': floating_ip['address'],
'instance_id': ec2_id}
if context.is_admin:
details = "%s (%s)" % (address['instance_id'],
floating_ip['project_id'])
address['instance_id'] = details
return address
def allocate_address(self, context, **kwargs):
LOG.audit(_("Allocate address"), context=context)
try:
public_ip = self.network_api.allocate_floating_ip(context)
except exception.FloatingIpLimitExceeded:
raise exception.EC2APIError(_('No more floating IPs available'))
return {'publicIp': public_ip}
def release_address(self, context, public_ip, **kwargs):
LOG.audit(_('Release address %s'), public_ip, context=context)
try:
self.network_api.release_floating_ip(context, address=public_ip)
return {'return': "true"}
except exception.FloatingIpNotFound:
raise exception.EC2APIError(_('Unable to release IP Address.'))
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %(public_ip)s to instance "
"%(instance_id)s"),
{'public_ip': public_ip, 'instance_id': instance_id},
context=context)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
cached_ipinfo = ec2utils.get_ip_info_for_instance(context, instance)
fixed_ips = cached_ipinfo['fixed_ips'] + cached_ipinfo['fixed_ip6s']
if not fixed_ips:
msg = _('Unable to associate IP Address, no fixed_ips.')
raise exception.EC2APIError(msg)
# TODO(tr3buchet): this will associate the floating IP with the
# first fixed_ip an instance has. This should be
# changed to support specifying a particular fixed_ip if
# multiple exist but this may not apply to ec2..
if len(fixed_ips) > 1:
msg = _('multiple fixed_ips exist, using the first: %s')
LOG.warning(msg, fixed_ips[0])
try:
self.network_api.associate_floating_ip(context, instance,
floating_address=public_ip,
fixed_address=fixed_ips[0])
return {'return': 'true'}
except exception.FloatingIpAssociated:
msg = _('Floating ip is already associated.')
raise exception.EC2APIError(msg)
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed.')
raise exception.EC2APIError(msg)
except Exception:
msg = _('Error, unable to associate floating ip.')
LOG.exception(msg)
raise exception.EC2APIError(msg)
def disassociate_address(self, context, public_ip, **kwargs):
instance_id = self.network_api.get_instance_id_by_floating_address(
context, public_ip)
instance = self.compute_api.get(context, instance_id)
LOG.audit(_("Disassociate address %s"), public_ip, context=context)
try:
self.network_api.disassociate_floating_ip(context, instance,
address=public_ip)
except exception.FloatingIpNotAssociated:
msg = _('Floating ip is not associated.')
raise exception.EC2APIError(msg)
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise exception.EC2APIError(msg)
return {'return': "true"}
def run_instances(self, context, **kwargs):
min_count = int(kwargs.get('min_count', 1))
client_token = kwargs.get('client_token')
if client_token:
resv_id = self._resv_id_from_token(context, client_token)
if resv_id:
# since this client_token already corresponds to a reservation
# id, this returns a proper response without creating a new
# instance
return self._format_run_instances(context, resv_id)
if kwargs.get('kernel_id'):
kernel = self._get_image(context, kwargs['kernel_id'])
kwargs['kernel_id'] = ec2utils.id_to_glance_id(context,
kernel['id'])
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context,
ramdisk['id'])
for bdm in kwargs.get('block_device_mapping', []):
_parse_block_device_mapping(bdm)
image = self._get_image(context, kwargs['image_id'])
image_uuid = ec2utils.id_to_glance_id(context, image['id'])
if image:
image_state = self._get_image_state(image)
else:
raise exception.ImageNotFoundEC2(image_id=kwargs['image_id'])
if image_state != 'available':
raise exception.EC2APIError(_('Image must be available'))
(instances, resv_id) = self.compute_api.create(context,
instance_type=flavors.get_flavor_by_name(
kwargs.get('instance_type', None)),
image_href=image_uuid,
max_count=int(kwargs.get('max_count', min_count)),
min_count=min_count,
kernel_id=kwargs.get('kernel_id'),
ramdisk_id=kwargs.get('ramdisk_id'),
key_name=kwargs.get('key_name'),
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'availability_zone'),
block_device_mapping=kwargs.get('block_device_mapping', {}))
instances = self._format_run_instances(context, resv_id)
if instances:
instance_ids = [i['instanceId'] for i in instances['instancesSet']]
self._add_client_token(context, client_token, instance_ids)
return instances
def _add_client_token(self, context, client_token, instance_ids):
"""Add client token to reservation ID mapping."""
if client_token:
self.create_tags(context, resource_id=instance_ids,
tag=[{'key': 'EC2_client_token',
'value': client_token}])
def _resv_id_from_token(self, context, client_token):
"""Get reservation ID from db."""
resv_id = None
tags = self.describe_tags(context, filter=[
{'name': 'key', 'value': 'EC2_client_token'},
])
tagSet = tags.get('tagSet')
# work around bug #1190845
for tags in tagSet:
if tags.get('value') == client_token:
instances = self._ec2_ids_to_instances(
context, [tags['resource_id']])
resv_id = instances[0]['reservation_id']
return resv_id
def _ec2_ids_to_instances(self, context, instance_id, objects=False):
"""Get all instances first, to prevent partial executions."""
instances = []
extra = ['system_metadata', 'metadata']
for ec2_id in instance_id:
validate_ec2_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
if objects:
instance = instance_obj.Instance.get_by_uuid(
context, instance_uuid, expected_attrs=extra)
else:
instance = self.compute_api.get(context, instance_uuid)
instances.append(instance)
return instances
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified.
"""
previous_states = self._ec2_ids_to_instances(context, instance_id)
LOG.debug(_("Going to start terminating instances"))
for instance in previous_states:
self.compute_api.delete(context, instance)
return self._format_terminate_instances(context,
instance_id,
previous_states)
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids."""
instances = self._ec2_ids_to_instances(context, instance_id,
objects=True)
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for instance in instances:
self.compute_api.reboot(context, instance, 'HARD')
return True
def stop_instances(self, context, instance_id, **kwargs):
"""Stop each instances in instance_id.
Here instance_id is a list of instance ids
"""
instances = self._ec2_ids_to_instances(context, instance_id, True)
LOG.debug(_("Going to stop instances"))
for instance in instances:
self.compute_api.stop(context, instance)
return True
def start_instances(self, context, instance_id, **kwargs):
"""Start each instances in instance_id.
Here instance_id is a list of instance ids
"""
instances = self._ec2_ids_to_instances(context, instance_id, True)
LOG.debug(_("Going to start instances"))
for instance in instances:
self.compute_api.start(context, instance)
return True
def _get_image(self, context, ec2_id):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
image = self.image_service.show(context, internal_id)
except (exception.InvalidEc2Id, exception.ImageNotFound):
filters = {'name': ec2_id}
images = self.image_service.detail(context, filters=filters)
try:
return images[0]
except IndexError:
raise exception.ImageNotFound(image_id=ec2_id)
image_type = ec2_id.split('-')[0]
if ec2utils.image_type(image.get('container_format')) != image_type:
raise exception.ImageNotFound(image_id=ec2_id)
return image
def _format_image(self, image):
"""Convert from format defined by GlanceImageService to S3 format."""
i = {}
image_type = ec2utils.image_type(image.get('container_format'))
ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type)
name = image.get('name')
i['imageId'] = ec2_id
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki')
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari')
i['imageOwnerId'] = image.get('owner')
img_loc = image['properties'].get('image_location')
if img_loc:
i['imageLocation'] = img_loc
else:
i['imageLocation'] = "%s (%s)" % (img_loc, name)
i['name'] = name
if not name and img_loc:
# This should only occur for images registered with ec2 api
# prior to that api populating the glance name
i['name'] = img_loc
i['imageState'] = self._get_image_state(image)
i['description'] = image.get('description')
display_mapping = {'aki': 'kernel',
'ari': 'ramdisk',
'ami': 'machine'}
i['imageType'] = display_mapping.get(image_type)
i['isPublic'] = not not image.get('is_public')
i['architecture'] = image['properties'].get('architecture')
properties = image['properties']
root_device_name = block_device.properties_root_device_name(properties)
root_device_type = 'instance-store'
for bdm in properties.get('block_device_mapping', []):
if (block_device.strip_dev(bdm.get('device_name')) ==
block_device.strip_dev(root_device_name) and
('snapshot_id' in bdm or 'volume_id' in bdm) and
not bdm.get('no_device')):
root_device_type = 'ebs'
i['rootDeviceName'] = (root_device_name or
block_device.DEFAULT_ROOT_DEV_NAME)
i['rootDeviceType'] = root_device_type
_format_mappings(properties, i)
return i
def describe_images(self, context, image_id=None, **kwargs):
# NOTE: image_id is a list!
if image_id:
images = []
for ec2_id in image_id:
try:
image = self._get_image(context, ec2_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=ec2_id)
images.append(image)
else:
images = self.image_service.detail(context)
images = [self._format_image(i) for i in images]
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
LOG.audit(_("De-registering image %s"), image_id, context=context)
image = self._get_image(context, image_id)
internal_id = image['id']
self.image_service.delete(context, internal_id)
return True
def _register_image(self, context, metadata):
image = self.image_service.create(context, metadata)
image_type = ec2utils.image_type(image.get('container_format'))
image_id = ec2utils.image_ec2_id(image['id'], image_type)
return image_id
def register_image(self, context, image_location=None, **kwargs):
if image_location is None and kwargs.get('name'):
image_location = kwargs['name']
if image_location is None:
raise exception.EC2APIError(_('imageLocation is required'))
metadata = {'properties': {'image_location': image_location}}
if kwargs.get('name'):
metadata['name'] = kwargs['name']
else:
metadata['name'] = image_location
if 'root_device_name' in kwargs:
metadata['properties']['root_device_name'] = kwargs.get(
'root_device_name')
mappings = [_parse_block_device_mapping(bdm) for bdm in
kwargs.get('block_device_mapping', [])]
if mappings:
metadata['properties']['block_device_mapping'] = mappings
image_id = self._register_image(context, metadata)
LOG.audit(_('Registered image %(image_location)s with id '
'%(image_id)s'),
{'image_location': image_location, 'image_id': image_id},
context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
def _block_device_mapping_attribute(image, result):
_format_mappings(image['properties'], result)
def _launch_permission_attribute(image, result):
result['launchPermission'] = []
if image['is_public']:
result['launchPermission'].append({'group': 'all'})
def _root_device_name_attribute(image, result):
_prop_root_dev_name = block_device.properties_root_device_name
result['rootDeviceName'] = _prop_root_dev_name(image['properties'])
if result['rootDeviceName'] is None:
result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME
def _kernel_attribute(image, result):
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
result['kernel'] = {
'value': ec2utils.image_ec2_id(kernel_id, 'aki')
}
def _ramdisk_attribute(image, result):
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
result['ramdisk'] = {
'value': ec2utils.image_ec2_id(ramdisk_id, 'ari')
}
supported_attributes = {
'blockDeviceMapping': _block_device_mapping_attribute,
'launchPermission': _launch_permission_attribute,
'rootDeviceName': _root_device_name_attribute,
'kernel': _kernel_attribute,
'ramdisk': _ramdisk_attribute,
}
fn = supported_attributes.get(attribute)
if fn is None:
raise exception.EC2APIError(_('attribute not supported: %s')
% attribute)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
result = {'imageId': image_id}
fn(image, result)
return result
def modify_image_attribute(self, context, image_id, attribute,
operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.EC2APIError(_('attribute not supported: %s')
% attribute)
if 'user_group' not in kwargs:
raise exception.EC2APIError(_('user or group not specified'))
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.EC2APIError(_('only group "all" is supported'))
if operation_type not in ['add', 'remove']:
msg = _('operation_type must be add or remove')
raise exception.EC2APIError(msg)
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
internal_id = image['id']
del(image['id'])
image['is_public'] = (operation_type == 'add')
try:
return self.image_service.update(context, internal_id, image)
except exception.ImageNotAuthorized:
msg = _('Not allowed to modify attributes for image %s')
raise exception.EC2APIError(msg % image_id)
def update_image(self, context, image_id, **kwargs):
internal_id = ec2utils.ec2_id_to_id(image_id)
result = self.image_service.update(context, internal_id, dict(kwargs))
return result
# TODO(yamahata): race condition
# At the moment there is no way to prevent others from
# manipulating instances/volumes/snapshots.
# As other code doesn't take it into consideration, here we don't
# care of it for now. Ostrich algorithm
def create_image(self, context, instance_id, **kwargs):
# NOTE(yamahata): name/description are ignored by register_image(),
# do so here
no_reboot = kwargs.get('no_reboot', False)
name = kwargs.get('name')
validate_ec2_id(instance_id)
ec2_instance_id = instance_id
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_instance_id)
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
bdms = self.compute_api.get_instance_bdms(context, instance)
# CreateImage only supported for the analogue of EBS-backed instances
if not self.compute_api.is_volume_backed_instance(context, instance,
bdms):
msg = _("Invalid value '%(ec2_instance_id)s' for instanceId. "
"Instance does not have a volume attached at root "
"(%(root)s)") % {'root': instance['root_device_name'],
'ec2_instance_id': ec2_instance_id}
raise exception.InvalidParameterValue(err=msg)
# stop the instance if necessary
restart_instance = False
if not no_reboot:
vm_state = instance['vm_state']
# if the instance is in subtle state, refuse to proceed.
if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
if vm_state == vm_states.ACTIVE:
restart_instance = True
self.compute_api.stop(context, instance)
# wait instance for really stopped
start_time = time.time()
while vm_state != vm_states.STOPPED:
time.sleep(1)
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
vm_state = instance['vm_state']
# NOTE(yamahata): timeout and error. 1 hour for now for safety.
# Is it too short/long?
# Or is there any better way?
timeout = 1 * 60 * 60
if time.time() > start_time + timeout:
raise exception.EC2APIError(
_('Couldn\'t stop instance with in %d sec') % timeout)
glance_uuid = instance['image_ref']
ec2_image_id = ec2utils.glance_id_to_ec2_id(context, glance_uuid)
src_image = self._get_image(context, ec2_image_id)
image_meta = dict(src_image)
def _unmap_id_property(properties, name):
if properties[name]:
properties[name] = ec2utils.id_to_glance_id(context,
properties[name])
# ensure the ID properties are unmapped back to the glance UUID
_unmap_id_property(image_meta['properties'], 'kernel_id')
_unmap_id_property(image_meta['properties'], 'ramdisk_id')
# meaningful image name
name_map = dict(instance=instance['uuid'], now=timeutils.isotime())
name = name or _('image of %(instance)s at %(now)s') % name_map
new_image = self.compute_api.snapshot_volume_backed(context,
instance,
image_meta,
name)
ec2_id = ec2utils.glance_id_to_ec2_id(context, new_image['id'])
if restart_instance:
self.compute_api.start(context, instance)
return {'imageId': ec2_id}
def create_tags(self, context, **kwargs):
"""Add tags to a resource
Returns True on success, error on failure.
:param context: context under which the method is called
"""
resources = kwargs.get('resource_id', None)
tags = kwargs.get('tag', None)
if resources is None or tags is None:
raise exception.EC2APIError(_('resource_id and tag are required'))
if not isinstance(resources, (tuple, list, set)):
raise exception.EC2APIError(_('Expecting a list of resources'))
for r in resources:
if ec2utils.resource_type_from_id(context, r) != 'instance':
raise exception.EC2APIError(_('Only instances implemented'))
if not isinstance(tags, (tuple, list, set)):
raise exception.EC2APIError(_('Expecting a list of tagSets'))
metadata = {}
for tag in tags:
if not isinstance(tag, dict):
raise exception.EC2APIError(_
('Expecting tagSet to be key/value pairs'))
key = tag.get('key', None)
val = tag.get('value', None)
if key is None or val is None:
raise exception.EC2APIError(_
('Expecting both key and value to be set'))
metadata[key] = val
for ec2_id in resources:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
self.compute_api.update_instance_metadata(context,
instance, metadata)
return True
def delete_tags(self, context, **kwargs):
"""Delete tags
Returns True on success, error on failure.
:param context: context under which the method is called
"""
resources = kwargs.get('resource_id', None)
tags = kwargs.get('tag', None)
if resources is None or tags is None:
raise exception.EC2APIError(_('resource_id and tag are required'))
if not isinstance(resources, (tuple, list, set)):
raise exception.EC2APIError(_('Expecting a list of resources'))
for r in resources:
if ec2utils.resource_type_from_id(context, r) != 'instance':
raise exception.EC2APIError(_('Only instances implemented'))
if not isinstance(tags, (tuple, list, set)):
raise exception.EC2APIError(_('Expecting a list of tagSets'))
for ec2_id in resources:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
for tag in tags:
if not isinstance(tag, dict):
raise exception.EC2APIError(_
('Expecting tagSet to be key/value pairs'))
key = tag.get('key', None)
if key is None:
raise exception.EC2APIError(_('Expecting key to be set'))
self.compute_api.delete_instance_metadata(context,
instance, key)
return True
def describe_tags(self, context, **kwargs):
"""List tags
Returns a dict with a single key 'tagSet' on success, error on failure.
:param context: context under which the method is called
"""
filters = kwargs.get('filter', None)
search_filts = []
if filters:
for filter_block in filters:
key_name = filter_block.get('name', None)
val = filter_block.get('value', None)
if val:
if isinstance(val, dict):
val = val.values()
if not isinstance(val, (tuple, list, set)):
val = (val,)
if key_name:
search_block = {}
if key_name in ('resource_id', 'resource-id'):
search_block['resource_id'] = []
for res_id in val:
search_block['resource_id'].append(
ec2utils.ec2_inst_id_to_uuid(context, res_id))
elif key_name in ['key', 'value']:
search_block[key_name] = val
elif key_name in ('resource_type', 'resource-type'):
for res_type in val:
if res_type != 'instance':
raise exception.EC2APIError(_
('Only instances implemented'))
search_block[key_name] = 'instance'
if len(search_block.keys()) > 0:
search_filts.append(search_block)
ts = []
for tag in self.compute_api.get_all_instance_metadata(context,
search_filts):
ts.append({
'resource_id': ec2utils.id_to_ec2_inst_id(tag['instance_id']),
'resource_type': 'instance',
'key': tag['key'],
'value': tag['value']
})
return {"tagSet": ts}
class EC2SecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
raise exception.InvalidParameterValue(err=msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.EC2APIError(message=msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.InvalidGroup(reason=msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
if decoding_exception:
raise decoding_exception
else:
raise exception.EC2APIError(_("Invalid CIDR"))
@staticmethod
def raise_over_quota(msg):
raise exception.EC2APIError(message=msg)
@staticmethod
def raise_not_found(msg):
pass
class CloudSecurityGroupNovaAPI(EC2SecurityGroupExceptions,
compute_api.SecurityGroupAPI):
pass
class CloudSecurityGroupNeutronAPI(EC2SecurityGroupExceptions,
neutron_driver.SecurityGroupAPI):
pass
def get_cloud_security_group_api():
if cfg.CONF.security_group_api.lower() == 'nova':
return CloudSecurityGroupNovaAPI()
elif cfg.CONF.security_group_api.lower() in ('neutron', 'quantum'):
return CloudSecurityGroupNeutronAPI()
else:
raise NotImplementedError()
| apache-2.0 | 6,961,433,997,333,561,000 | 41.960417 | 79 | 0.54784 | false |
insequent/kargo | contrib/terraform/terraform.py | 1 | 26747 | #!/usr/bin/env python2
#
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# original: https://github.com/CiscoCloud/terraform.py
"""\
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
directory and generates an inventory based on them.
"""
from __future__ import unicode_literals, print_function
import argparse
from collections import defaultdict
from functools import wraps
import json
import os
import re
VERSION = '0.3.0pre'
def tfstates(root=None):
root = root or os.getcwd()
for dirpath, _, filenames in os.walk(root):
for name in filenames:
if os.path.splitext(name)[-1] == '.tfstate':
yield os.path.join(dirpath, name)
def iterresources(filenames):
for filename in filenames:
with open(filename, 'r') as json_file:
state = json.load(json_file)
for module in state['modules']:
name = module['path'][-1]
for key, resource in module['resources'].items():
yield name, key, resource
## READ RESOURCES
PARSERS = {}
def _clean_dc(dcname):
# Consul DCs are strictly alphanumeric with underscores and hyphens -
# ensure that the consul_dc attribute meets these requirements.
return re.sub('[^\w_\-]', '-', dcname)
def iterhosts(resources):
'''yield host tuples of (name, attributes, groups)'''
for module_name, key, resource in resources:
resource_type, name = key.split('.', 1)
try:
parser = PARSERS[resource_type]
except KeyError:
continue
yield parser(resource, module_name)
def iterips(resources):
'''yield ip tuples of (instance_id, ip)'''
for module_name, key, resource in resources:
resource_type, name = key.split('.', 1)
if resource_type == 'openstack_compute_floatingip_associate_v2':
yield openstack_floating_ips(resource)
def parses(prefix):
def inner(func):
PARSERS[prefix] = func
return func
return inner
def calculate_mantl_vars(func):
"""calculate Mantl vars"""
@wraps(func)
def inner(*args, **kwargs):
name, attrs, groups = func(*args, **kwargs)
# attrs
if attrs.get('role', '') == 'control':
attrs['consul_is_server'] = True
else:
attrs['consul_is_server'] = False
# groups
if attrs.get('publicly_routable', False):
groups.append('publicly_routable')
return name, attrs, groups
return inner
def _parse_prefix(source, prefix, sep='.'):
for compkey, value in source.items():
try:
curprefix, rest = compkey.split(sep, 1)
except ValueError:
continue
if curprefix != prefix or rest == '#':
continue
yield rest, value
def parse_attr_list(source, prefix, sep='.'):
attrs = defaultdict(dict)
for compkey, value in _parse_prefix(source, prefix, sep):
idx, key = compkey.split(sep, 1)
attrs[idx][key] = value
return attrs.values()
def parse_dict(source, prefix, sep='.'):
return dict(_parse_prefix(source, prefix, sep))
def parse_list(source, prefix, sep='.'):
return [value for _, value in _parse_prefix(source, prefix, sep)]
def parse_bool(string_form):
token = string_form.lower()[0]
if token == 't':
return True
elif token == 'f':
return False
else:
raise ValueError('could not convert %r to a bool' % string_form)
@parses('triton_machine')
@calculate_mantl_vars
def triton_machine(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs.get('name')
groups = []
attrs = {
'id': raw_attrs['id'],
'dataset': raw_attrs['dataset'],
'disk': raw_attrs['disk'],
'firewall_enabled': parse_bool(raw_attrs['firewall_enabled']),
'image': raw_attrs['image'],
'ips': parse_list(raw_attrs, 'ips'),
'memory': raw_attrs['memory'],
'name': raw_attrs['name'],
'networks': parse_list(raw_attrs, 'networks'),
'package': raw_attrs['package'],
'primary_ip': raw_attrs['primaryip'],
'root_authorized_keys': raw_attrs['root_authorized_keys'],
'state': raw_attrs['state'],
'tags': parse_dict(raw_attrs, 'tags'),
'type': raw_attrs['type'],
'user_data': raw_attrs['user_data'],
'user_script': raw_attrs['user_script'],
# ansible
'ansible_ssh_host': raw_attrs['primaryip'],
'ansible_ssh_port': 22,
'ansible_ssh_user': 'root', # it's "root" on Triton by default
# generic
'public_ipv4': raw_attrs['primaryip'],
'provider': 'triton',
}
# private IPv4
for ip in attrs['ips']:
if ip.startswith('10') or ip.startswith('192.168'): # private IPs
attrs['private_ipv4'] = ip
break
if 'private_ipv4' not in attrs:
attrs['private_ipv4'] = attrs['public_ipv4']
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['tags'].get('dc', 'none')),
'role': attrs['tags'].get('role', 'none'),
'ansible_python_interpreter': attrs['tags'].get('python_bin', 'python')
})
# add groups based on attrs
groups.append('triton_image=' + attrs['image'])
groups.append('triton_package=' + attrs['package'])
groups.append('triton_state=' + attrs['state'])
groups.append('triton_firewall_enabled=%s' % attrs['firewall_enabled'])
groups.extend('triton_tags_%s=%s' % item
for item in attrs['tags'].items())
groups.extend('triton_network=' + network
for network in attrs['networks'])
# groups specific to Mantl
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('digitalocean_droplet')
@calculate_mantl_vars
def digitalocean_host(resource, tfvars=None):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'id': raw_attrs['id'],
'image': raw_attrs['image'],
'ipv4_address': raw_attrs['ipv4_address'],
'locked': parse_bool(raw_attrs['locked']),
'metadata': json.loads(raw_attrs.get('user_data', '{}')),
'region': raw_attrs['region'],
'size': raw_attrs['size'],
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
'status': raw_attrs['status'],
# ansible
'ansible_ssh_host': raw_attrs['ipv4_address'],
'ansible_ssh_port': 22,
'ansible_ssh_user': 'root', # it's always "root" on DO
# generic
'public_ipv4': raw_attrs['ipv4_address'],
'private_ipv4': raw_attrs.get('ipv4_address_private',
raw_attrs['ipv4_address']),
'provider': 'digitalocean',
}
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
'role': attrs['metadata'].get('role', 'none'),
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
})
# add groups based on attrs
groups.append('do_image=' + attrs['image'])
groups.append('do_locked=%s' % attrs['locked'])
groups.append('do_region=' + attrs['region'])
groups.append('do_size=' + attrs['size'])
groups.append('do_status=' + attrs['status'])
groups.extend('do_metadata_%s=%s' % item
for item in attrs['metadata'].items())
# groups specific to Mantl
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('softlayer_virtualserver')
@calculate_mantl_vars
def softlayer_host(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'id': raw_attrs['id'],
'image': raw_attrs['image'],
'ipv4_address': raw_attrs['ipv4_address'],
'metadata': json.loads(raw_attrs.get('user_data', '{}')),
'region': raw_attrs['region'],
'ram': raw_attrs['ram'],
'cpu': raw_attrs['cpu'],
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
'public_ipv4': raw_attrs['ipv4_address'],
'private_ipv4': raw_attrs['ipv4_address_private'],
'ansible_ssh_host': raw_attrs['ipv4_address'],
'ansible_ssh_port': 22,
'ansible_ssh_user': 'root',
'provider': 'softlayer',
}
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
'role': attrs['metadata'].get('role', 'none'),
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
})
# groups specific to Mantl
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
def openstack_floating_ips(resource):
raw_attrs = resource['primary']['attributes']
attrs = {
'ip': raw_attrs['floating_ip'],
'instance_id': raw_attrs['instance_id'],
}
return attrs
def openstack_floating_ips(resource):
raw_attrs = resource['primary']['attributes']
return raw_attrs['instance_id'], raw_attrs['floating_ip']
@parses('openstack_compute_instance_v2')
@calculate_mantl_vars
def openstack_host(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'access_ip_v4': raw_attrs['access_ip_v4'],
'access_ip_v6': raw_attrs['access_ip_v6'],
'access_ip': raw_attrs['access_ip_v4'],
'ip': raw_attrs['network.0.fixed_ip_v4'],
'flavor': parse_dict(raw_attrs, 'flavor',
sep='_'),
'id': raw_attrs['id'],
'image': parse_dict(raw_attrs, 'image',
sep='_'),
'key_pair': raw_attrs['key_pair'],
'metadata': parse_dict(raw_attrs, 'metadata'),
'network': parse_attr_list(raw_attrs, 'network'),
'region': raw_attrs.get('region', ''),
'security_groups': parse_list(raw_attrs, 'security_groups'),
# ansible
'ansible_ssh_port': 22,
# workaround for an OpenStack bug where hosts have a different domain
# after they're restarted
'host_domain': 'novalocal',
'use_host_domain': True,
# generic
'public_ipv4': raw_attrs['access_ip_v4'],
'private_ipv4': raw_attrs['access_ip_v4'],
'provider': 'openstack',
}
if 'floating_ip' in raw_attrs:
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
try:
attrs.update({
'ansible_ssh_host': raw_attrs['access_ip_v4'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
# Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017
# attrs specific to Ansible
if 'metadata.ssh_user' in raw_attrs:
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
if 'volume.#' in raw_attrs.keys() and int(raw_attrs['volume.#']) > 0:
device_index = 1
for key, value in raw_attrs.items():
match = re.search("^volume.*.device$", key)
if match:
attrs['disk_volume_device_'+str(device_index)] = value
device_index += 1
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
})
# add groups based on attrs
groups.append('os_image=' + attrs['image']['name'])
groups.append('os_flavor=' + attrs['flavor']['name'])
groups.extend('os_metadata_%s=%s' % item
for item in attrs['metadata'].items())
groups.append('os_region=' + attrs['region'])
# groups specific to Mantl
groups.append('role=' + attrs['metadata'].get('role', 'none'))
groups.append('dc=' + attrs['consul_dc'])
# groups specific to kubespray
for group in attrs['metadata'].get('kubespray_groups', "").split(","):
groups.append(group)
return name, attrs, groups
@parses('aws_instance')
@calculate_mantl_vars
def aws_host(resource, module_name):
name = resource['primary']['attributes']['tags.Name']
raw_attrs = resource['primary']['attributes']
groups = []
attrs = {
'ami': raw_attrs['ami'],
'availability_zone': raw_attrs['availability_zone'],
'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'),
'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']),
'ephemeral_block_device': parse_attr_list(raw_attrs,
'ephemeral_block_device'),
'id': raw_attrs['id'],
'key_name': raw_attrs['key_name'],
'private': parse_dict(raw_attrs, 'private',
sep='_'),
'public': parse_dict(raw_attrs, 'public',
sep='_'),
'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'),
'security_groups': parse_list(raw_attrs, 'security_groups'),
'subnet': parse_dict(raw_attrs, 'subnet',
sep='_'),
'tags': parse_dict(raw_attrs, 'tags'),
'tenancy': raw_attrs['tenancy'],
'vpc_security_group_ids': parse_list(raw_attrs,
'vpc_security_group_ids'),
# ansible-specific
'ansible_ssh_port': 22,
'ansible_ssh_host': raw_attrs['public_ip'],
# generic
'public_ipv4': raw_attrs['public_ip'],
'private_ipv4': raw_attrs['private_ip'],
'provider': 'aws',
}
# attrs specific to Ansible
if 'tags.sshUser' in raw_attrs:
attrs['ansible_ssh_user'] = raw_attrs['tags.sshUser']
if 'tags.sshPrivateIp' in raw_attrs:
attrs['ansible_ssh_host'] = raw_attrs['private_ip']
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['tags'].get('dc', module_name)),
'role': attrs['tags'].get('role', 'none'),
'ansible_python_interpreter': attrs['tags'].get('python_bin','python')
})
# groups specific to Mantl
groups.extend(['aws_ami=' + attrs['ami'],
'aws_az=' + attrs['availability_zone'],
'aws_key_name=' + attrs['key_name'],
'aws_tenancy=' + attrs['tenancy']])
groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items())
groups.extend('aws_vpc_security_group=' + group
for group in attrs['vpc_security_group_ids'])
groups.extend('aws_subnet_%s=%s' % subnet
for subnet in attrs['subnet'].items())
# groups specific to Mantl
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('google_compute_instance')
@calculate_mantl_vars
def gce_host(resource, module_name):
name = resource['primary']['id']
raw_attrs = resource['primary']['attributes']
groups = []
# network interfaces
interfaces = parse_attr_list(raw_attrs, 'network_interface')
for interface in interfaces:
interface['access_config'] = parse_attr_list(interface,
'access_config')
for key in interface.keys():
if '.' in key:
del interface[key]
# general attrs
attrs = {
'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',
'disks': parse_attr_list(raw_attrs, 'disk'),
'machine_type': raw_attrs['machine_type'],
'metadata': parse_dict(raw_attrs, 'metadata'),
'network': parse_attr_list(raw_attrs, 'network'),
'network_interface': interfaces,
'self_link': raw_attrs['self_link'],
'service_account': parse_attr_list(raw_attrs, 'service_account'),
'tags': parse_list(raw_attrs, 'tags'),
'zone': raw_attrs['zone'],
# ansible
'ansible_ssh_port': 22,
'provider': 'gce',
}
# attrs specific to Ansible
if 'metadata.ssh_user' in raw_attrs:
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
})
try:
attrs.update({
'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'] or interfaces[0]['access_config'][0]['assigned_nat_ip'],
'public_ipv4': interfaces[0]['access_config'][0]['nat_ip'] or interfaces[0]['access_config'][0]['assigned_nat_ip'],
'private_ipv4': interfaces[0]['address'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
# add groups based on attrs
groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])
groups.append('gce_machine_type=' + attrs['machine_type'])
groups.extend('gce_metadata_%s=%s' % (key, value)
for (key, value) in attrs['metadata'].items()
if key not in set(['sshKeys']))
groups.extend('gce_tag=' + tag for tag in attrs['tags'])
groups.append('gce_zone=' + attrs['zone'])
if attrs['can_ip_forward']:
groups.append('gce_ip_forward')
if attrs['publicly_routable']:
groups.append('gce_publicly_routable')
# groups specific to Mantl
groups.append('role=' + attrs['metadata'].get('role', 'none'))
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('vsphere_virtual_machine')
@calculate_mantl_vars
def vsphere_host(resource, module_name):
raw_attrs = resource['primary']['attributes']
network_attrs = parse_dict(raw_attrs, 'network_interface')
network = parse_dict(network_attrs, '0')
ip_address = network.get('ipv4_address', network['ip_address'])
name = raw_attrs['name']
groups = []
attrs = {
'id': raw_attrs['id'],
'ip_address': ip_address,
'private_ipv4': ip_address,
'public_ipv4': ip_address,
'metadata': parse_dict(raw_attrs, 'custom_configuration_parameters'),
'ansible_ssh_port': 22,
'provider': 'vsphere',
}
try:
attrs.update({
'ansible_ssh_host': ip_address,
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', })
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('consul_dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
})
# attrs specific to Ansible
if 'ssh_user' in attrs['metadata']:
attrs['ansible_ssh_user'] = attrs['metadata']['ssh_user']
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('azure_instance')
@calculate_mantl_vars
def azure_host(resource, module_name):
name = resource['primary']['attributes']['name']
raw_attrs = resource['primary']['attributes']
groups = []
attrs = {
'automatic_updates': raw_attrs['automatic_updates'],
'description': raw_attrs['description'],
'hosted_service_name': raw_attrs['hosted_service_name'],
'id': raw_attrs['id'],
'image': raw_attrs['image'],
'ip_address': raw_attrs['ip_address'],
'location': raw_attrs['location'],
'name': raw_attrs['name'],
'reverse_dns': raw_attrs['reverse_dns'],
'security_group': raw_attrs['security_group'],
'size': raw_attrs['size'],
'ssh_key_thumbprint': raw_attrs['ssh_key_thumbprint'],
'subnet': raw_attrs['subnet'],
'username': raw_attrs['username'],
'vip_address': raw_attrs['vip_address'],
'virtual_network': raw_attrs['virtual_network'],
'endpoint': parse_attr_list(raw_attrs, 'endpoint'),
# ansible
'ansible_ssh_port': 22,
'ansible_ssh_user': raw_attrs['username'],
'ansible_ssh_host': raw_attrs['vip_address'],
}
# attrs specific to mantl
attrs.update({
'consul_dc': attrs['location'].lower().replace(" ", "-"),
'role': attrs['description']
})
# groups specific to mantl
groups.extend(['azure_image=' + attrs['image'],
'azure_location=' + attrs['location'].lower().replace(" ", "-"),
'azure_username=' + attrs['username'],
'azure_security_group=' + attrs['security_group']])
# groups specific to mantl
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('clc_server')
@calculate_mantl_vars
def clc_server(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs.get('id')
groups = []
md = parse_dict(raw_attrs, 'metadata')
attrs = {
'metadata': md,
'ansible_ssh_port': md.get('ssh_port', 22),
'ansible_ssh_user': md.get('ssh_user', 'root'),
'provider': 'clc',
'publicly_routable': False,
}
try:
attrs.update({
'public_ipv4': raw_attrs['public_ip_address'],
'private_ipv4': raw_attrs['private_ip_address'],
'ansible_ssh_host': raw_attrs['public_ip_address'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({
'ansible_ssh_host': raw_attrs['private_ip_address'],
'private_ipv4': raw_attrs['private_ip_address'],
})
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
})
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
def iter_host_ips(hosts, ips):
'''Update hosts that have an entry in the floating IP list'''
for host in hosts:
host_id = host[1]['id']
if host_id in ips:
ip = ips[host_id]
host[1].update({
'access_ip_v4': ip,
'access_ip': ip,
'public_ipv4': ip,
'ansible_ssh_host': ip,
})
yield host
## QUERY TYPES
def query_host(hosts, target):
for name, attrs, _ in hosts:
if name == target:
return attrs
return {}
def query_list(hosts):
groups = defaultdict(dict)
meta = {}
for name, attrs, hostgroups in hosts:
for group in set(hostgroups):
# Ansible 2.6.2 stopped supporting empty group names: https://github.com/ansible/ansible/pull/42584/commits/d4cd474b42ed23d8f8aabb2a7f84699673852eaf
# Empty group name defaults to "all" in Ansible < 2.6.2 so we alter empty group names to "all"
if not group: group = "all"
groups[group].setdefault('hosts', [])
groups[group]['hosts'].append(name)
meta[name] = attrs
groups['_meta'] = {'hostvars': meta}
return groups
def query_hostfile(hosts):
out = ['## begin hosts generated by terraform.py ##']
out.extend(
'{}\t{}'.format(attrs['ansible_ssh_host'].ljust(16), name)
for name, attrs, _ in hosts
)
out.append('## end hosts generated by terraform.py ##')
return '\n'.join(out)
def main():
parser = argparse.ArgumentParser(
__file__, __doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
modes = parser.add_mutually_exclusive_group(required=True)
modes.add_argument('--list',
action='store_true',
help='list all variables')
modes.add_argument('--host', help='list variables for a single host')
modes.add_argument('--version',
action='store_true',
help='print version and exit')
modes.add_argument('--hostfile',
action='store_true',
help='print hosts as a /etc/hosts snippet')
parser.add_argument('--pretty',
action='store_true',
help='pretty-print output JSON')
parser.add_argument('--nometa',
action='store_true',
help='with --list, exclude hostvars')
default_root = os.environ.get('TERRAFORM_STATE_ROOT',
os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', )))
parser.add_argument('--root',
default=default_root,
help='custom root to search for `.tfstate`s in')
args = parser.parse_args()
if args.version:
print('%s %s' % (__file__, VERSION))
parser.exit()
hosts = iterhosts(iterresources(tfstates(args.root)))
# Perform a second pass on the file to pick up floating_ip entries to update the ip address of referenced hosts
ips = dict(iterips(iterresources(tfstates(args.root))))
if ips:
hosts = iter_host_ips(hosts, ips)
if args.list:
output = query_list(hosts)
if args.nometa:
del output['_meta']
print(json.dumps(output, indent=4 if args.pretty else None))
elif args.host:
output = query_host(hosts, args.host)
print(json.dumps(output, indent=4 if args.pretty else None))
elif args.hostfile:
output = query_hostfile(hosts)
print(output)
parser.exit()
if __name__ == '__main__':
main()
| apache-2.0 | 199,135,797,092,256,700 | 32.728878 | 160 | 0.57584 | false |
NetApp/cinder | cinder/volume/drivers/netapp/dataontap/fc_7mode.py | 4 | 5533 | # Copyright (c) - 2014, Clinton Knight. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp Data ONTAP (7-mode) FibreChannel storage systems.
"""
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap import block_7mode
from cinder.zonemanager import utils as fczm_utils
@interface.volumedriver
class NetApp7modeFibreChannelDriver(driver.BaseVD,
driver.ConsistencyGroupVD,
driver.ManageableVD,
driver.ExtendVD,
driver.TransferVD,
driver.SnapshotVD):
"""NetApp 7-mode FibreChannel volume driver."""
DRIVER_NAME = 'NetApp_FibreChannel_7mode_direct'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_CI"
VERSION = block_7mode.NetAppBlockStorage7modeLibrary.VERSION
def __init__(self, *args, **kwargs):
super(NetApp7modeFibreChannelDriver, self).__init__(*args, **kwargs)
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def create_volume(self, volume):
return self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
return self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
return self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
return self.library.get_volume_stats(refresh,
self.get_filter_function(),
self.get_goodness_function())
def get_default_filter_function(self):
return self.library.get_default_filter_function()
def get_default_goodness_function(self):
return self.library.get_default_goodness_function()
def extend_volume(self, volume, new_size):
self.library.extend_volume(volume, new_size)
def ensure_export(self, context, volume):
return self.library.ensure_export(context, volume)
def create_export(self, context, volume, connector):
return self.library.create_export(context, volume)
def remove_export(self, context, volume):
self.library.remove_export(context, volume)
def manage_existing(self, volume, existing_ref):
return self.library.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.library.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
return self.library.unmanage(volume)
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
return self.library.initialize_connection_fc(volume, connector)
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
return self.library.terminate_connection_fc(volume, connector,
**kwargs)
def get_pool(self, volume):
return self.library.get_pool(volume)
def create_consistencygroup(self, context, group):
return self.library.create_consistencygroup(group)
def delete_consistencygroup(self, context, group, volumes):
return self.library.delete_consistencygroup(group, volumes)
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
return self.library.update_consistencygroup(group, add_volumes=None,
remove_volumes=None)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.create_cgsnapshot(cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.delete_cgsnapshot(cgsnapshot, snapshots)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
return self.library.create_consistencygroup_from_src(
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
source_cg=source_cg, source_vols=source_vols)
def failover_host(self, context, volumes, secondary_id=None):
raise NotImplementedError()
| apache-2.0 | 6,472,076,403,904,274,000 | 39.094203 | 78 | 0.657509 | false |
taedori81/shoop | shoop/admin/modules/orders/views/detail.py | 2 | 3148 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http.response import HttpResponseRedirect
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
from django.views.generic import DetailView
from shoop.admin.toolbar import PostActionButton, Toolbar, URLActionButton
from shoop.admin.utils.urls import get_model_url
from shoop.core.models import Order, OrderStatus, OrderStatusRole
from shoop.utils.excs import Problem
class OrderDetailView(DetailView):
model = Order
template_name = "shoop/admin/orders/detail.jinja"
context_object_name = "order"
def get_toolbar(self):
order = self.object
toolbar = Toolbar()
toolbar.append(URLActionButton(
text=_("Create Shipment"),
icon="fa fa-truck",
disable_reason=_("There are no products to ship") if not order.get_unshipped_products() else None,
url=reverse("shoop_admin:order.create-shipment", kwargs={"pk": order.pk}),
extra_css_class="btn-info"
))
toolbar.append(PostActionButton(
post_url=reverse("shoop_admin:order.set-status", kwargs={"pk": order.pk}),
name="status",
value=OrderStatus.objects.get_default_complete().pk,
text=_("Set Complete"),
icon="fa fa-check-circle",
disable_reason=(
_("This order can not be set as complete at this point")
if not order.can_set_complete()
else None
),
extra_css_class="btn-success"
))
# TODO: Add extensibility to action_button_groups?
return toolbar
def get_context_data(self, **kwargs):
context = super(OrderDetailView, self).get_context_data(**kwargs)
context["toolbar"] = self.get_toolbar()
context["title"] = force_text(self.object)
return context
class OrderSetStatusView(DetailView):
model = Order
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(get_model_url(self.get_object()))
def post(self, request, *args, **kwargs):
order = self.object = self.get_object()
new_status = OrderStatus.objects.get(pk=int(request.POST["status"]))
if new_status.role == OrderStatusRole.COMPLETE and not order.can_set_complete():
raise Problem(_("Unable to set order as completed at this point"))
old_status = order.status
order.status = new_status
order.save(update_fields=("status",))
message = _("Order status changed: %s to %s") % (old_status, new_status)
order.add_log_entry(message, user=request.user, identifier="status_change")
messages.success(self.request, message)
return HttpResponseRedirect(get_model_url(self.get_object()))
| agpl-3.0 | -7,167,904,037,655,316,000 | 37.864198 | 110 | 0.656607 | false |
Sterlingcoin/Sterlingcoin-1.4-Lite-Edition-Linux-Source | src/qt/res/themes/qdarkstyle/__init__.py | 4 | 3636 | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) <2013-2014> <Colin Duquesnoy>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
Initialise the QDarkStyleSheet module when used with python.
This modules provides a function to transparently load the stylesheets
with the correct rc file.
"""
import logging
import platform
__version__ = "1.12"
def _logger():
return logging.getLogger('qdarkstyle')
def load_stylesheet(pyside=True):
"""
Loads the stylesheet. Takes care of importing the rc module.
:param pyside: True to load the pyside rc file, False to load the PyQt rc file
:return the stylesheet string
"""
# Smart import of the rc file
if pyside:
import qdarkstyle.pyside_style_rc
else:
import qdarkstyle.pyqt_style_rc
# Load the stylesheet content from resources
if not pyside:
from PyQt4.QtCore import QFile, QTextStream
else:
from PySide.QtCore import QFile, QTextStream
f = QFile(":qdarkstyle/style.qss")
if not f.exists():
_logger().error("Unable to load stylesheet, file not found in "
"resources")
return ""
else:
f.open(QFile.ReadOnly | QFile.Text)
ts = QTextStream(f)
stylesheet = ts.readAll()
if platform.system().lower() == 'darwin': # see issue #12 on github
mac_fix = '''
QDockWidget::title
{
background-color: #353434;
text-align: center;
}
'''
stylesheet += mac_fix
return stylesheet
def load_stylesheet_pyqt5():
"""
Loads the stylesheet for use in a pyqt5 application.
:param pyside: True to load the pyside rc file, False to load the PyQt rc file
:return the stylesheet string
"""
# Smart import of the rc file
import qdarkstyle.pyqt5_style_rc
# Load the stylesheet content from resources
from PyQt5.QtCore import QFile, QTextStream
f = QFile(":qdarkstyle/style.qss")
if not f.exists():
_logger().error("Unable to load stylesheet, file not found in "
"resources")
return ""
else:
f.open(QFile.ReadOnly | QFile.Text)
ts = QTextStream(f)
stylesheet = ts.readAll()
if platform.system().lower() == 'darwin': # see issue #12 on github
mac_fix = '''
QDockWidget::title
{
background-color: #353434;
text-align: center;
}
'''
stylesheet += mac_fix
return stylesheet | mit | 4,703,372,961,797,003,000 | 30.626087 | 82 | 0.643839 | false |
airbnb/superset | superset/db_engine_specs/exasol.py | 4 | 1847 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, List, Optional, Tuple
from superset.db_engine_specs.base import BaseEngineSpec
class ExasolEngineSpec(BaseEngineSpec): # pylint: disable=abstract-method
"""Engine spec for Exasol"""
engine = "exa"
engine_name = "Exasol"
max_column_name_length = 128
# Exasol's DATE_TRUNC function is PostgresSQL compatible
_time_grain_expressions = {
None: "{col}",
"PT1S": "DATE_TRUNC('second', {col})",
"PT1M": "DATE_TRUNC('minute', {col})",
"PT1H": "DATE_TRUNC('hour', {col})",
"P1D": "DATE_TRUNC('day', {col})",
"P1W": "DATE_TRUNC('week', {col})",
"P1M": "DATE_TRUNC('month', {col})",
"P0.25Y": "DATE_TRUNC('quarter', {col})",
"P1Y": "DATE_TRUNC('year', {col})",
}
@classmethod
def fetch_data(
cls, cursor: Any, limit: Optional[int] = None
) -> List[Tuple[Any, ...]]:
data = super().fetch_data(cursor, limit)
# Lists of `pyodbc.Row` need to be unpacked further
return cls.pyodbc_rows_to_tuples(data)
| apache-2.0 | 4,776,618,002,529,822,000 | 37.479167 | 74 | 0.662696 | false |
jianghuaw/nova | nova/tests/unit/api/openstack/compute/test_snapshots.py | 4 | 6622 | # Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute import volumes as volumes_v21
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.volume import cinder
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class SnapshotApiTestV21(test.NoDBTestCase):
controller = volumes_v21.SnapshotController()
validation_error = exception.ValidationError
def setUp(self):
super(SnapshotApiTestV21, self).setUp()
fakes.stub_out_networking(self)
self.stub_out("nova.volume.cinder.API.create_snapshot",
fakes.stub_snapshot_create)
self.stub_out("nova.volume.cinder.API.create_snapshot_force",
fakes.stub_snapshot_create)
self.stub_out("nova.volume.cinder.API.delete_snapshot",
fakes.stub_snapshot_delete)
self.stub_out("nova.volume.cinder.API.get_snapshot",
fakes.stub_snapshot_get)
self.stub_out("nova.volume.cinder.API.get_all_snapshots",
fakes.stub_snapshot_get_all)
self.stub_out("nova.volume.cinder.API.get", fakes.stub_volume_get)
self.req = fakes.HTTPRequest.blank('')
def _test_snapshot_create(self, force):
snapshot = {"volume_id": '12',
"force": force,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
resp_dict = self.controller.create(self.req, body=body)
self.assertIn('snapshot', resp_dict)
self.assertEqual(snapshot['display_name'],
resp_dict['snapshot']['displayName'])
self.assertEqual(snapshot['display_description'],
resp_dict['snapshot']['displayDescription'])
self.assertEqual(snapshot['volume_id'],
resp_dict['snapshot']['volumeId'])
def test_snapshot_create(self):
self._test_snapshot_create(False)
def test_snapshot_create_force(self):
self._test_snapshot_create(True)
def test_snapshot_create_invalid_force_param(self):
body = {'snapshot': {'volume_id': '1',
'force': '**&&^^%%$$##@@'}}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_snapshot_delete(self):
snapshot_id = '123'
delete = self.controller.delete
result = delete(self.req, snapshot_id)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller, volumes_v21.SnapshotController):
status_int = delete.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
@mock.patch.object(cinder.API, 'delete_snapshot',
side_effect=exception.SnapshotNotFound(snapshot_id=FAKE_UUID))
def test_delete_snapshot_not_exists(self, mock_mr):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, FAKE_UUID)
def test_snapshot_delete_invalid_id(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, '-1')
def test_snapshot_show(self):
snapshot_id = '123'
resp_dict = self.controller.show(self.req, snapshot_id)
self.assertIn('snapshot', resp_dict)
self.assertEqual(str(snapshot_id), resp_dict['snapshot']['id'])
def test_snapshot_show_invalid_id(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
self.req, '-1')
def test_snapshot_detail(self):
resp_dict = self.controller.detail(self.req)
self.assertIn('snapshots', resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(3, len(resp_snapshots))
resp_snapshot = resp_snapshots.pop()
self.assertEqual(102, resp_snapshot['id'])
def test_snapshot_detail_offset_and_limit(self):
path = '/v2/fake/os-snapshots/detail?offset=1&limit=1'
req = fakes.HTTPRequest.blank(path)
resp_dict = self.controller.detail(req)
self.assertIn('snapshots', resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(1, len(resp_snapshots))
resp_snapshot = resp_snapshots.pop()
self.assertEqual(101, resp_snapshot['id'])
def test_snapshot_index(self):
resp_dict = self.controller.index(self.req)
self.assertIn('snapshots', resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(3, len(resp_snapshots))
def test_snapshot_index_offset_and_limit(self):
path = '/v2/fake/os-snapshots?offset=1&limit=1'
req = fakes.HTTPRequest.blank(path)
resp_dict = self.controller.index(req)
self.assertIn('snapshots', resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(1, len(resp_snapshots))
class TestSnapshotAPIDeprecation(test.NoDBTestCase):
def setUp(self):
super(TestSnapshotAPIDeprecation, self).setUp()
self.controller = volumes_v21.SnapshotController()
self.req = fakes.HTTPRequest.blank('', version='2.36')
def test_all_apis_return_not_found(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.delete, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.index, self.req)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.create, self.req, {})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.detail, self.req)
| apache-2.0 | -1,050,108,856,147,623,800 | 40.3875 | 78 | 0.647539 | false |
rootmos/ldapy | test/test_ldapy.py | 1 | 21501 | import unittest2
import mock
import configuration
from ldapy.node import NodeError
from ldapy.ldapy import Ldapy, AlreadyAtRoot, SetAttributeError, DeleteError
from ldapy.exceptions import NoSuchObject, NoSuchObjectInRoot
import io
import tempfile
from ldapy.connection_data import *
class BasicLdapyTests (unittest2.TestCase):
def setUp (self):
self.con = configuration.getConnection ()
# Prepare by setting the historyFile to a temporary file
self.historyFile = tempfile.NamedTemporaryFile()
ConnectionDataManager.filename = self.historyFile.name
def getLdapyAtRoot (self):
with configuration.provision() as p:
ldapy = Ldapy(self.con)
self.root = p.root
ldapy.changeDN (self.root)
return ldapy
def test_list_roots (self):
ldapy = Ldapy (self.con)
with configuration.provision() as p:
self.assertIn (p.root, ldapy.children)
def test_change_DN_to_root (self):
ldapy = Ldapy(self.con)
with configuration.provision() as p:
root = p.root
ldapy.changeDN (root)
self.assertEqual (root, ldapy.cwd)
self.assertIn ("top", ldapy.attributes["objectClass"])
def test_change_DN_up_one_level (self):
ldapy = self.getLdapyAtRoot()
with configuration.provision() as p:
c1 = p.container()
ldapy.changeDN(c1.rdn)
c2 = p.container(c1)
ldapy.changeDN(c2.rdn)
self.assertEqual (ldapy.cwd, c2.dn)
ldapy.goUpOneLevel ()
self.assertEqual (ldapy.cwd, c1.dn)
def test_getAttributes_self_and_parent (self):
ldapy = self.getLdapyAtRoot()
with configuration.provision() as p:
c1 = p.container(dnComponent="o", objectClass="organization")
ldapy.changeDN(c1.rdn)
c2 = p.container(c1)
ldapy.changeDN(c2.rdn)
self.assertIn (c2.objectClass, ldapy.getAttributes (".")["objectClass"])
self.assertIn (c1.objectClass, ldapy.getAttributes ("..")["objectClass"])
def test_superroot_has_empty_attributes (self):
ldapy = Ldapy (self.con)
self.assertDictEqual ({}, ldapy.attributes)
def test_setAttribute_calls_setAttribute_on_node (self):
ldapy = self.getLdapyAtRoot()
with configuration.provision() as p:
with mock.patch('ldapy.node.Node.setAttribute', autospec=True) as setterMock:
l = p.leaf()
attribute = "description"
oldValue = "test_setAttribute_calls_setAttribute_on_node_old"
newValue = "test_setAttribute_calls_setAttribute_on_node_new"
child = ldapy._resolveRelativeDN (l.rdn)
ldapy.setAttribute (l.rdn, attribute,
newValue = newValue, oldValue = oldValue)
setterMock.assert_called_once_with (child, attribute,
newValue = newValue, oldValue = oldValue)
def test_delete_calls_delete_on_node (self):
ldapy = self.getLdapyAtRoot()
with configuration.provision() as p:
with mock.patch('ldapy.node.Node.delete', autospec=True) as deleteMock:
l = p.leaf()
child = ldapy._resolveRelativeDN (l.rdn)
ldapy.delete (l.rdn)
deleteMock.assert_called_once_with (child)
def test_add_calls_add_on_node (self):
ldapy = self.getLdapyAtRoot()
with mock.patch('ldapy.node.Node.add', autospec=True) as addMock:
cwd = ldapy._resolveRelativeDN (".")
rdn = "cn=Foo"
attr = {"objectClass": "Bar"}
ldapy.add (rdn, attr)
addMock.assert_called_once_with (cwd, rdn, attr)
def test_successful_connection_calls_addRecentConnection (self):
connectionData = ConnectionData (configuration.uri,
configuration.admin,
configuration.admin_password)
with mock.patch("ldapy.ldapy.Ldapy.parseArguments", autospec=True) as parseArgumentsMock:
with mock.patch("ldapy.connection_data.ConnectionDataManager.addRecentConnection", autospec=True) as addRecentConnectionMock:
parseArgumentsMock.return_value = (connectionData, True)
ldapy = Ldapy()
addRecentConnectionMock.assert_called_once_with (ldapy._lazyConnectionDataManager, connectionData)
class ChildCompleter (unittest2.TestCase):
def setUp (self):
self.con = configuration.getConnection ()
def getLdapyAtRoot (self):
with configuration.provision() as p:
ldapy = Ldapy(self.con)
self.root = p.root
ldapy.changeDN (self.root)
return ldapy
def test_empty_input (self):
ldapy = self.getLdapyAtRoot()
with configuration.provision() as p:
c = p.container()
ldapy.changeDN(c.rdn)
l1 = p.leaf(c)
l2 = p.leaf(c)
matches = ldapy.completeChild ("")
self.assertListEqual(sorted([l1.rdn, l2.rdn]), sorted(matches))
def test_matches_several (self):
ldapy = self.getLdapyAtRoot()
with configuration.provision() as p:
c = p.container()
ldapy.changeDN(c.rdn)
c0 = p.container(c)
l1 = p.leaf(c)
l2 = p.leaf(c)
matches = ldapy.completeChild ("%s=" % l1.dnComponent)
self.assertListEqual(sorted([l1.rdn, l2.rdn]), sorted(matches))
def test_matches_unique (self):
ldapy = self.getLdapyAtRoot()
with configuration.provision() as p:
c = p.container()
ldapy.changeDN(c.rdn)
l1 = p.leaf(c)
l2 = p.leaf(c)
matches = ldapy.completeChild (l1.rdn[:-1])
self.assertListEqual (matches, [l1.rdn])
matches = ldapy.completeChild (l2.rdn[:-1])
self.assertListEqual (matches, [l2.rdn])
def test_no_matches (self):
matches = Ldapy(self.con).completeChild ("dc=nonexistent")
self.assertListEqual (matches, [])
class ErrorLdapyTests (unittest2.TestCase):
def setUp (self):
self.con = configuration.getConnection ()
# Prepare by setting the historyFile to a temporary file
self.historyFile = tempfile.NamedTemporaryFile()
ConnectionDataManager.filename = self.historyFile.name
def getLdapyAtRoot (self):
with configuration.provision() as p:
ldapy = Ldapy(self.con)
self.root = p.root
ldapy.changeDN (self.root)
return ldapy
def test_change_DN_to_nonexistent_root (self):
ldapy = Ldapy (self.con)
nonexistent = "dc=nonexistent"
with self.assertRaises(NoSuchObjectInRoot) as received:
ldapy.changeDN (nonexistent)
expected = NoSuchObjectInRoot (nonexistent)
self.assertEqual (str(received.exception), str(expected))
def test_change_DN_to_nonexistent_child (self):
with configuration.provision() as p:
ldapy = Ldapy (self.con)
ldapy.changeDN (p.root)
nonexistentRDN = "ou=Foobar"
nonexistent = "%s,%s" % (nonexistentRDN, p.root)
with self.assertRaises(NoSuchObject) as received:
ldapy.changeDN (nonexistentRDN)
expected = NoSuchObject (nonexistent)
self.assertEqual (str(received.exception), str(expected))
def test_up_one_level_too_far (self):
ldapy = Ldapy (self.con)
with self.assertRaises(AlreadyAtRoot) as received:
ldapy.goUpOneLevel ()
expected = AlreadyAtRoot ()
self.assertEqual (str(received.exception), str(expected))
def test_NoSuchObject_for_setAttribute (self):
ldapy = self.getLdapyAtRoot()
nonexistentRDN = "dc=nonexistent"
nonexistent = "%s,%s" % (nonexistentRDN, ldapy.cwd)
attribute = "description"
with self.assertRaises(NoSuchObject) as received:
ldapy.setAttribute (nonexistentRDN, attribute)
expected = NoSuchObject (nonexistent)
self.assertEqual (str(received.exception), str(expected))
def test_setAttribute_errors_are_propagated (self):
ldapy = self.getLdapyAtRoot()
with configuration.provision() as p:
l = p.leaf()
testMessage = "test_setAttribute_errors_are_propagated_msg"
with mock.patch('ldapy.node.Node.setAttribute',
side_effect=NodeError(None, testMessage)):
with self.assertRaises(SetAttributeError) as received:
ldapy.setAttribute (l.rdn, "attribute")
self.assertEqual (received.exception.msg, testMessage)
self.assertEqual (str(received.exception), testMessage)
def test_NoSuchObject_for_delete (self):
ldapy = self.getLdapyAtRoot()
nonexistentRDN = "dc=nonexistent"
nonexistent = "%s,%s" % (nonexistentRDN, ldapy.cwd)
with self.assertRaises(NoSuchObject) as received:
ldapy.delete (nonexistentRDN)
expected = NoSuchObject (nonexistent)
self.assertEqual (str(received.exception), str(expected))
def test_delete_errors_are_propagated (self):
ldapy = self.getLdapyAtRoot()
with configuration.provision() as p:
l = p.leaf()
testMessage = "test_delete_errors_are_propagated_msg"
with mock.patch('ldapy.node.Node.delete',
side_effect=NodeError(None, testMessage)):
with self.assertRaises(DeleteError) as received:
ldapy.delete (l.rdn)
self.assertEqual (received.exception.msg, testMessage)
self.assertEqual (str(received.exception), testMessage)
def test_failed_connection_does_not_call_addRecentConnection (self):
connectionData = ConnectionData ("ldap://foo", configuration.admin, configuration.admin_password)
with mock.patch("ldapy.ldapy.Ldapy.parseArguments", autospec=True) as parseArgumentsMock:
with mock.patch("ldapy.connection_data.ConnectionDataManager.addRecentConnection", autospec=True) as addRecentConnectionMock:
parseArgumentsMock.return_value = (connectionData, True)
try:
ldapy = Ldapy()
# Expect that Ldapy's constructor calls sys.exit
self.assertTrue(False)
except SystemExit:
self.assertFalse (addRecentConnectionMock.called)
def assertSystemExitStatus (test, e, code):
if hasattr(e, "code"):
test.assertEqual (e.code, code)
else:
test.assertEqual (e, code)
class ArgumentParserTests (unittest2.TestCase):
def setUp (self):
self.con = configuration.getConnection ()
# Prepare by setting the historyFile to a temporary file
self.historyFile = tempfile.NamedTemporaryFile()
ConnectionDataManager.filename = self.historyFile.name
def test_successful_parse_with_host (self):
ldapy = Ldapy (self.con)
host = "localhost"
port = 7
uri = "ldap://%s:%u" % (host, port)
bind_dn = "cn=admin"
password = "foobar"
args = ["-H", host, "-p", str(port), "-D", bind_dn, "-w", password]
connectionData, _ = ldapy.parseArguments (args)
self.assertEqual (connectionData.uri, uri)
self.assertEqual (connectionData.bind_dn, bind_dn)
self.assertEqual (connectionData.password, password)
def test_successful_parse_with_uri (self):
ldapy = Ldapy (self.con)
host = "localhost"
port = 7
uri = "ldap://%s:%u" % (host, port)
bind_dn = "cn=admin"
password = "foobar"
args = ["ldap://%s:%s" % (host, port), "-D", bind_dn, "-w", password]
connectionData, _ = ldapy.parseArguments (args)
self.assertEqual (connectionData.uri, uri)
self.assertEqual (connectionData.bind_dn, bind_dn)
self.assertEqual (connectionData.password, password)
def test_neither_host_nor_uri_is_specified_and_no_recent_connection (self):
ldapy = Ldapy (self.con)
with mock.patch('sys.stderr', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments ([])
self.assertIn(ldapy._neither_host_nor_uri_given, output.getvalue())
assertSystemExitStatus(self, e.exception, 2)
def test_both_host_and_uri_is_specified (self):
ldapy = Ldapy (self.con)
with mock.patch('sys.stderr', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["-H", "foo", "ldap://bar"])
self.assertIn(ldapy._both_host_and_uri_given, output.getvalue())
assertSystemExitStatus(self, e.exception, 2)
def test_malformed_uri (self):
ldapy = Ldapy (self.con)
with mock.patch('sys.stderr', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["foobar://lars"])
self.assertIn(ldapy._uri_malformed, output.getvalue())
assertSystemExitStatus(self, e.exception, 2)
def test_port_invalid_number (self):
ldapy = Ldapy (self.con)
with mock.patch('sys.stderr', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["-H", "foo", "-p", "-1"])
self.assertIn(ldapy._port_is_not_a_valid_number, output.getvalue())
assertSystemExitStatus(self, e.exception, 2)
with mock.patch('sys.stderr', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["-H", "foo", "-p", str(0xffff + 1)])
self.assertIn(ldapy._port_is_not_a_valid_number, output.getvalue())
assertSystemExitStatus(self, e.exception, 2)
def test_previous_connection (self):
ldapy = Ldapy (self.con)
getter = mock.create_autospec (ldapy.connectionDataManager.getRecentConnection)
getter.return_value = {}
ldapy.connectionDataManager.getRecentConnection = getter
N = 7
connectionData, new = ldapy.parseArguments (["--previous", str(N)])
getter.assert_called_once_with (N)
self.assertIs (connectionData, getter.return_value)
self.assertFalse (new)
def test_no_such_previous_connection (self):
ldapy = Ldapy (self.con)
N = 7
with mock.patch ('sys.stderr', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["--previous", str(N)])
msg = str(NoSuchRecentConnection(N))
self.assertIn (msg, output.getvalue())
assertSystemExitStatus(self, e.exception, 3)
def test_list_previous_connections (self):
ldapy = Ldapy (self.con)
getter = mock.create_autospec (ldapy.connectionDataManager.getRecentConnections)
ldapy.connectionDataManager.getRecentConnections = getter
a = ConnectionData("ldap://a.com", "cn=a")
b = ConnectionData("ldap://b.com", "cn=b")
getter.return_value = [a, b]
with mock.patch ('sys.stdout', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["-P"])
assertSystemExitStatus(self, e.exception, 0)
lines = output.getvalue().splitlines()
self.assertIn (a.uri, lines[0])
self.assertIn (a.bind_dn, lines[0])
self.assertIn (b.uri, lines[1])
self.assertIn (b.bind_dn, lines[1])
def test_previous_connection_with_too_many_arguments (self):
ldapy = Ldapy (self.con)
with mock.patch ('sys.stderr', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["--previous", "6", "7"])
assertSystemExitStatus(self, e.exception, 2)
self.assertIn (ldapy._too_many_arguments, output.getvalue())
def test_saved_connection (self):
ldapy = Ldapy (self.con)
getter = mock.create_autospec (ldapy.connectionDataManager.getConnection)
getter.return_value = {}
ldapy.connectionDataManager.getConnection = getter
name = "foo"
connectionData, new = ldapy.parseArguments (["--saved", name])
getter.assert_called_once_with (name)
self.assertIs (connectionData, getter.return_value)
self.assertFalse (new)
def test_no_such_saved_connection (self):
ldapy = Ldapy (self.con)
name = "foo"
with mock.patch ('sys.stderr', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["-S", name])
msg = str(NoSuchSavedConnection(name))
self.assertIn (msg, output.getvalue())
assertSystemExitStatus(self, e.exception, 3)
def test_list_saved_connections (self):
ldapy = Ldapy (self.con)
getter = mock.create_autospec (ldapy.connectionDataManager.getConnections)
ldapy.connectionDataManager.getConnections = getter
nameA = "nameA"
a = ConnectionData("ldap://a.com", "cn=a")
nameB = "nameB"
b = ConnectionData("ldap://b.com", "cn=b")
getter.return_value = {nameA:a, nameB:b}
with mock.patch ('sys.stdout', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["--saved"])
assertSystemExitStatus(self, e.exception, 0)
lines = output.getvalue().splitlines()
self.assertIn (nameA, lines[0])
self.assertIn (a.uri, lines[0])
self.assertIn (a.bind_dn, lines[0])
self.assertIn (nameB, lines[1])
self.assertIn (b.uri, lines[1])
self.assertIn (b.bind_dn, lines[1])
def test_saved_connection_with_too_many_arguments (self):
ldapy = Ldapy (self.con)
with mock.patch ('sys.stderr', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["--saved", "foo", "bar"])
assertSystemExitStatus(self, e.exception, 2)
self.assertIn (ldapy._too_many_arguments, output.getvalue())
def test_save_connection (self):
ldapy = Ldapy (self.con)
getter = mock.create_autospec (ldapy.connectionDataManager.getRecentConnection)
getter.return_value = {}
ldapy.connectionDataManager.getRecentConnection = getter
saver = mock.create_autospec (ldapy.connectionDataManager.saveConnection)
ldapy.connectionDataManager.saveConnection = saver
N = 7
name = "foo"
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["--save", str(N), name])
assertSystemExitStatus(self, e.exception, 0)
getter.assert_called_once_with (N)
saver.assert_called_once_with (name, getter.return_value)
def test_save_with_no_such_connection (self):
ldapy = Ldapy (self.con)
N = 7
name = "foo"
with mock.patch ('sys.stderr', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["--save", str(N), name])
msg = str(NoSuchRecentConnection(N))
self.assertIn (msg, output.getvalue())
assertSystemExitStatus(self, e.exception, 3)
def test_save_with_not_a_number (self):
ldapy = Ldapy (self.con)
with mock.patch ('sys.stderr', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["--save", "foo", "bar"])
self.assertIn (Ldapy._first_argument_must_be_a_number, output.getvalue())
assertSystemExitStatus(self, e.exception, 2)
def test_remove_connection (self):
ldapy = Ldapy (self.con)
remover = mock.create_autospec (ldapy.connectionDataManager.removeConnection)
ldapy.connectionDataManager.removeConnection = remover
name = "foo"
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["--remove", name])
assertSystemExitStatus(self, e.exception, 0)
remover.assert_called_once_with (name)
def test_remove_with_no_such_connection (self):
ldapy = Ldapy (self.con)
name = "foo"
with mock.patch ('sys.stderr', new_callable=io.BytesIO) as output:
with self.assertRaises(SystemExit) as e:
ldapy.parseArguments (["--remove", name])
msg = str(NoSuchSavedConnection(name))
self.assertIn (msg, output.getvalue())
assertSystemExitStatus(self, e.exception, 3)
def test_no_uri_or_host_defaults_to_last_connection (self):
ldapy = Ldapy (self.con)
getter = mock.create_autospec (ldapy.connectionDataManager.getRecentConnection)
getter.return_value = {}
ldapy.connectionDataManager.getRecentConnection = getter
connectionData, new = ldapy.parseArguments ([])
getter.assert_called_once_with ()
self.assertIs (connectionData, getter.return_value)
self.assertFalse (new)
| gpl-3.0 | 2,128,548,720,864,343,800 | 35.753846 | 137 | 0.619832 | false |
Arthaey/anki | aqt/toolbar.py | 18 | 4335 | # Copyright: Damien Elmes <[email protected]>
# -*- coding: utf-8 -*-
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from aqt.qt import *
class Toolbar(object):
def __init__(self, mw, web):
self.mw = mw
self.web = web
self.web.page().mainFrame().setScrollBarPolicy(
Qt.Vertical, Qt.ScrollBarAlwaysOff)
self.web.setLinkHandler(self._linkHandler)
self.link_handlers = {
"decks": self._deckLinkHandler,
"study": self._studyLinkHandler,
"add": self._addLinkHandler,
"browse": self._browseLinkHandler,
"stats": self._statsLinkHandler,
"sync": self._syncLinkHandler,
}
def draw(self):
self.web.stdHtml(self._body % (
# may want a context menu here in the future
' '*20,
self._centerLinks(),
self._rightIcons()),
self._css)
# Available links
######################################################################
def _rightIconsList(self):
return [
["stats", "qrc:/icons/view-statistics.png",
_("Show statistics. Shortcut key: %s") % "Shift+S"],
["sync", "qrc:/icons/view-refresh.png",
_("Synchronize with AnkiWeb. Shortcut key: %s") % "Y"],
]
def _centerLinks(self):
links = [
["decks", _("Decks"), _("Shortcut key: %s") % "D"],
["add", _("Add"), _("Shortcut key: %s") % "A"],
["browse", _("Browse"), _("Shortcut key: %s") % "B"],
]
return self._linkHTML(links)
def _linkHTML(self, links):
buf = ""
for ln, name, title in links:
buf += '<a class=hitem title="%s" href="%s">%s</a>' % (
title, ln, name)
buf += " "*3
return buf
def _rightIcons(self):
buf = ""
for ln, icon, title in self._rightIconsList():
buf += '<a class=hitem title="%s" href="%s"><img width="16px" height="16px" src="%s"></a>' % (
title, ln, icon)
return buf
# Link handling
######################################################################
def _linkHandler(self, link):
# first set focus back to main window, or we're left with an ugly
# focus ring around the clicked item
self.mw.web.setFocus()
if link in self.link_handlers:
self.link_handlers[link]()
def _deckLinkHandler(self):
self.mw.moveToState("deckBrowser")
def _studyLinkHandler(self):
# if overview already shown, switch to review
if self.mw.state == "overview":
self.mw.col.startTimebox()
self.mw.moveToState("review")
else:
self.mw.onOverview()
def _addLinkHandler(self):
self.mw.onAddCard()
def _browseLinkHandler(self):
self.mw.onBrowse()
def _statsLinkHandler(self):
self.mw.onStats()
def _syncLinkHandler(self):
self.mw.onSync()
# HTML & CSS
######################################################################
_body = """
<table id=header width=100%%>
<tr>
<td width=16%% align=left>%s</td>
<td align=center>%s</td>
<td width=15%% align=right>%s</td>
</tr></table>
"""
_css = """
#header {
margin:0;
margin-top: 4px;
font-weight: bold;
}
html {
height: 100%;
background: -webkit-gradient(linear, left top, left bottom,
from(#ddd), to(#fff));
margin:0; padding:0;
}
body {
margin:0; padding:0;
position:absolute;
top:0;left:0;right:0;bottom:0;
-webkit-user-select: none;
border-bottom: 1px solid #aaa;
}
* { -webkit-user-drag: none; }
.hitem {
padding-right: 6px;
text-decoration: none;
color: #000;
}
.hitem:hover {
text-decoration: underline;
}
"""
class BottomBar(Toolbar):
_css = Toolbar._css + """
#header {
background: -webkit-gradient(linear, left top, left bottom,
from(#fff), to(#ddd));
border-bottom: 0;
border-top: 1px solid #aaa;
margin-bottom: 6px;
margin-top: 0;
}
"""
_centerBody = """
<center><table width=100%% height=100%% id=header><tr><td align=center>
%s</td></tr></table></center>
"""
def draw(self, buf):
self.web.show()
self.web.stdHtml(
self._centerBody % buf,
self._css)
| agpl-3.0 | -1,603,127,015,484,851,000 | 24.958084 | 106 | 0.52526 | false |
benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/spillover/spilloverpolicy_binding.py | 3 | 4023 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class spilloverpolicy_binding(base_resource):
""" Binding class showing the resources that can be bound to spilloverpolicy_binding.
"""
def __init__(self) :
self._name = ""
self.spilloverpolicy_gslbvserver_binding = []
self.spilloverpolicy_csvserver_binding = []
self.spilloverpolicy_lbvserver_binding = []
@property
def name(self) :
ur"""Name of the spillover policy.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the spillover policy.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def spilloverpolicy_csvserver_bindings(self) :
ur"""csvserver that can be bound to spilloverpolicy.
"""
try :
return self._spilloverpolicy_csvserver_binding
except Exception as e:
raise e
@property
def spilloverpolicy_lbvserver_bindings(self) :
ur"""lbvserver that can be bound to spilloverpolicy.
"""
try :
return self._spilloverpolicy_lbvserver_binding
except Exception as e:
raise e
@property
def spilloverpolicy_gslbvserver_bindings(self) :
ur"""gslbvserver that can be bound to spilloverpolicy.
"""
try :
return self._spilloverpolicy_gslbvserver_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(spilloverpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.spilloverpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name) :
ur""" Use this API to fetch spilloverpolicy_binding resource.
"""
try :
if type(name) is not list :
obj = spilloverpolicy_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [spilloverpolicy_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class spilloverpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.spilloverpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.spilloverpolicy_binding = [spilloverpolicy_binding() for _ in range(length)]
| apache-2.0 | -3,365,258,356,310,001,700 | 29.022388 | 125 | 0.710167 | false |
SciTools/iris | lib/iris/tests/unit/fileformats/netcdf/test__load_aux_factory.py | 3 | 8075 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.fileformats.netcdf._load_aux_factory` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from unittest import mock
import numpy as np
import warnings
from iris.coords import DimCoord
from iris.cube import Cube
from iris.fileformats.netcdf import _load_aux_factory
class TestAtmosphereHybridSigmaPressureCoordinate(tests.IrisTest):
def setUp(self):
standard_name = "atmosphere_hybrid_sigma_pressure_coordinate"
self.requires = dict(formula_type=standard_name)
self.ap = mock.MagicMock(units="units")
self.ps = mock.MagicMock(units="units")
coordinates = [(mock.sentinel.b, "b"), (self.ps, "ps")]
self.cube_parts = dict(coordinates=coordinates)
self.engine = mock.Mock(
requires=self.requires, cube_parts=self.cube_parts
)
self.cube = mock.create_autospec(Cube, spec_set=True, instance=True)
# Patch out the check_dependencies functionality.
func = "iris.aux_factory.HybridPressureFactory._check_dependencies"
patcher = mock.patch(func)
patcher.start()
self.addCleanup(patcher.stop)
def test_formula_terms_ap(self):
self.cube_parts["coordinates"].append((self.ap, "ap"))
self.requires["formula_terms"] = dict(ap="ap", b="b", ps="ps")
_load_aux_factory(self.engine, self.cube)
# Check cube.add_aux_coord method.
self.assertEqual(self.cube.add_aux_coord.call_count, 0)
# Check cube.add_aux_factory method.
self.assertEqual(self.cube.add_aux_factory.call_count, 1)
args, _ = self.cube.add_aux_factory.call_args
self.assertEqual(len(args), 1)
factory = args[0]
self.assertEqual(factory.delta, self.ap)
self.assertEqual(factory.sigma, mock.sentinel.b)
self.assertEqual(factory.surface_air_pressure, self.ps)
def test_formula_terms_a_p0(self):
coord_a = DimCoord(np.arange(5), units="1")
coord_p0 = DimCoord(10, units="Pa")
coord_expected = DimCoord(
np.arange(5) * 10,
units="Pa",
long_name="vertical pressure",
var_name="ap",
)
self.cube_parts["coordinates"].extend(
[(coord_a, "a"), (coord_p0, "p0")]
)
self.requires["formula_terms"] = dict(a="a", b="b", ps="ps", p0="p0")
_load_aux_factory(self.engine, self.cube)
# Check cube.coord_dims method.
self.assertEqual(self.cube.coord_dims.call_count, 1)
args, _ = self.cube.coord_dims.call_args
self.assertEqual(len(args), 1)
self.assertIs(args[0], coord_a)
# Check cube.add_aux_coord method.
self.assertEqual(self.cube.add_aux_coord.call_count, 1)
args, _ = self.cube.add_aux_coord.call_args
self.assertEqual(len(args), 2)
self.assertEqual(args[0], coord_expected)
self.assertIsInstance(args[1], mock.Mock)
# Check cube.add_aux_factory method.
self.assertEqual(self.cube.add_aux_factory.call_count, 1)
args, _ = self.cube.add_aux_factory.call_args
self.assertEqual(len(args), 1)
factory = args[0]
self.assertEqual(factory.delta, coord_expected)
self.assertEqual(factory.sigma, mock.sentinel.b)
self.assertEqual(factory.surface_air_pressure, self.ps)
def test_formula_terms_a_p0__promote_a_units_unknown_to_dimensionless(
self,
):
coord_a = DimCoord(np.arange(5), units="unknown")
coord_p0 = DimCoord(10, units="Pa")
coord_expected = DimCoord(
np.arange(5) * 10,
units="Pa",
long_name="vertical pressure",
var_name="ap",
)
self.cube_parts["coordinates"].extend(
[(coord_a, "a"), (coord_p0, "p0")]
)
self.requires["formula_terms"] = dict(a="a", b="b", ps="ps", p0="p0")
_load_aux_factory(self.engine, self.cube)
# Check cube.coord_dims method.
self.assertEqual(self.cube.coord_dims.call_count, 1)
args, _ = self.cube.coord_dims.call_args
self.assertEqual(len(args), 1)
self.assertIs(args[0], coord_a)
self.assertEqual("1", args[0].units)
# Check cube.add_aux_coord method.
self.assertEqual(self.cube.add_aux_coord.call_count, 1)
args, _ = self.cube.add_aux_coord.call_args
self.assertEqual(len(args), 2)
self.assertEqual(args[0], coord_expected)
self.assertIsInstance(args[1], mock.Mock)
# Check cube.add_aux_factory method.
self.assertEqual(self.cube.add_aux_factory.call_count, 1)
args, _ = self.cube.add_aux_factory.call_args
self.assertEqual(len(args), 1)
factory = args[0]
self.assertEqual(factory.delta, coord_expected)
self.assertEqual(factory.sigma, mock.sentinel.b)
self.assertEqual(factory.surface_air_pressure, self.ps)
def test_formula_terms_p0_non_scalar(self):
coord_p0 = DimCoord(np.arange(5))
self.cube_parts["coordinates"].append((coord_p0, "p0"))
self.requires["formula_terms"] = dict(p0="p0")
with self.assertRaises(ValueError):
_load_aux_factory(self.engine, self.cube)
def test_formula_terms_p0_bounded(self):
coord_a = DimCoord(np.arange(5))
coord_p0 = DimCoord(1, bounds=[0, 2], var_name="p0")
self.cube_parts["coordinates"].extend(
[(coord_a, "a"), (coord_p0, "p0")]
)
self.requires["formula_terms"] = dict(a="a", b="b", ps="ps", p0="p0")
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter("always")
_load_aux_factory(self.engine, self.cube)
self.assertEqual(len(warn), 1)
msg = (
"Ignoring atmosphere hybrid sigma pressure scalar "
"coordinate {!r} bounds.".format(coord_p0.name())
)
self.assertEqual(msg, str(warn[0].message))
def _check_no_delta(self):
# Check cube.add_aux_coord method.
self.assertEqual(self.cube.add_aux_coord.call_count, 0)
# Check cube.add_aux_factory method.
self.assertEqual(self.cube.add_aux_factory.call_count, 1)
args, _ = self.cube.add_aux_factory.call_args
self.assertEqual(len(args), 1)
factory = args[0]
# Check that the factory has no delta term
self.assertEqual(factory.delta, None)
self.assertEqual(factory.sigma, mock.sentinel.b)
self.assertEqual(factory.surface_air_pressure, self.ps)
def test_formula_terms_ap_missing_coords(self):
self.requires["formula_terms"] = dict(ap="ap", b="b", ps="ps")
with mock.patch("warnings.warn") as warn:
_load_aux_factory(self.engine, self.cube)
warn.assert_called_once_with(
"Unable to find coordinate for variable " "'ap'"
)
self._check_no_delta()
def test_formula_terms_no_delta_terms(self):
self.requires["formula_terms"] = dict(b="b", ps="ps")
_load_aux_factory(self.engine, self.cube)
self._check_no_delta()
def test_formula_terms_no_p0_term(self):
coord_a = DimCoord(np.arange(5), units="Pa")
self.cube_parts["coordinates"].append((coord_a, "a"))
self.requires["formula_terms"] = dict(a="a", b="b", ps="ps")
_load_aux_factory(self.engine, self.cube)
self._check_no_delta()
def test_formula_terms_no_a_term(self):
coord_p0 = DimCoord(10, units="1")
self.cube_parts["coordinates"].append((coord_p0, "p0"))
self.requires["formula_terms"] = dict(a="p0", b="b", ps="ps")
_load_aux_factory(self.engine, self.cube)
self._check_no_delta()
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | 107,467,245,724,303,900 | 40.839378 | 78 | 0.615728 | false |
genius1611/horizon | django-openstack/django_openstack/tests/view_tests/auth_tests.py | 1 | 8089 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.contrib import messages
from django.core.urlresolvers import reverse
from django_openstack import api
from django_openstack.tests.view_tests import base
from openstackx.api import exceptions as api_exceptions
from mox import IsA
class AuthViewTests(base.BaseViewTests):
def setUp(self):
super(AuthViewTests, self).setUp()
self.setActiveUser()
self.PASSWORD = 'secret'
def test_login_index(self):
res = self.client.get(reverse('auth_login'))
self.assertTemplateUsed(res, 'splash.html')
def test_login_user_logged_in(self):
self.setActiveUser(self.TEST_TOKEN, self.TEST_USER, self.TEST_TENANT,
False, self.TEST_SERVICE_CATALOG)
res = self.client.get(reverse('auth_login'))
self.assertRedirectsNoFollow(res, reverse('dash_overview'))
def test_login_admin_logged_in(self):
self.setActiveUser(self.TEST_TOKEN, self.TEST_USER, self.TEST_TENANT,
True, self.TEST_SERVICE_CATALOG)
res = self.client.get(reverse('auth_login'))
self.assertRedirectsNoFollow(res, reverse('syspanel_overview'))
def test_login_no_tenants(self):
NEW_TENANT_ID = '6'
NEW_TENANT_NAME = 'FAKENAME'
TOKEN_ID = 1
form_data = {'method': 'Login',
'password': self.PASSWORD,
'username': self.TEST_USER}
self.mox.StubOutWithMock(api, 'token_create')
class FakeToken(object):
id = TOKEN_ID,
user = {'roles': [{'name': 'fake'}]},
serviceCatalog = {}
aToken = api.Token(FakeToken())
api.token_create(IsA(http.HttpRequest), "", self.TEST_USER,
self.PASSWORD).AndReturn(aToken)
aTenant = self.mox.CreateMock(api.Token)
aTenant.id = NEW_TENANT_ID
aTenant.name = NEW_TENANT_NAME
self.mox.StubOutWithMock(api, 'tenant_list_for_token')
api.tenant_list_for_token(IsA(http.HttpRequest), aToken.id).\
AndReturn([])
self.mox.StubOutWithMock(messages, 'error')
messages.error(IsA(http.HttpRequest), IsA(unicode))
self.mox.ReplayAll()
res = self.client.post(reverse('auth_login'), form_data)
self.assertTemplateUsed(res, 'splash.html')
self.mox.VerifyAll()
def test_login(self):
NEW_TENANT_ID = '6'
NEW_TENANT_NAME = 'FAKENAME'
TOKEN_ID = 1
form_data = {'method': 'Login',
'password': self.PASSWORD,
'username': self.TEST_USER}
self.mox.StubOutWithMock(api, 'token_create')
class FakeToken(object):
id = TOKEN_ID,
user = {"id": "1",
"roles": [{"id": "1", "name": "fake"}], "name": "user"}
serviceCatalog = {}
tenant = None
aToken = api.Token(FakeToken())
bToken = aToken
api.token_create(IsA(http.HttpRequest), "", self.TEST_USER,
self.PASSWORD).AndReturn(aToken)
aTenant = self.mox.CreateMock(api.Token)
aTenant.id = NEW_TENANT_ID
aTenant.name = NEW_TENANT_NAME
bToken.tenant = {'id': aTenant.id, 'name': aTenant.name}
self.mox.StubOutWithMock(api, 'tenant_list_for_token')
api.tenant_list_for_token(IsA(http.HttpRequest), aToken.id).\
AndReturn([aTenant])
self.mox.StubOutWithMock(api, 'token_create_scoped')
api.token_create_scoped(IsA(http.HttpRequest), aTenant.id,
aToken.id).AndReturn(bToken)
self.mox.ReplayAll()
res = self.client.post(reverse('auth_login'), form_data)
self.assertRedirectsNoFollow(res, reverse('dash_overview'))
self.mox.VerifyAll()
def test_login_invalid_credentials(self):
form_data = {'method': 'Login',
'password': self.PASSWORD,
'username': self.TEST_USER}
self.mox.StubOutWithMock(api, 'token_create')
unauthorized = api_exceptions.Unauthorized('unauth', message='unauth')
api.token_create(IsA(http.HttpRequest), "", self.TEST_USER,
self.PASSWORD).AndRaise(unauthorized)
self.mox.ReplayAll()
res = self.client.post(reverse('auth_login'), form_data)
self.assertTemplateUsed(res, 'splash.html')
self.mox.VerifyAll()
def test_login_exception(self):
form_data = {'method': 'Login',
'password': self.PASSWORD,
'username': self.TEST_USER}
self.mox.StubOutWithMock(api, 'token_create')
api_exception = api_exceptions.ApiException('apiException',
message='apiException')
api.token_create(IsA(http.HttpRequest), "", self.TEST_USER,
self.PASSWORD).AndRaise(api_exception)
self.mox.ReplayAll()
res = self.client.post(reverse('auth_login'), form_data)
self.assertTemplateUsed(res, 'splash.html')
self.mox.VerifyAll()
def test_switch_tenants_index(self):
res = self.client.get(reverse('auth_switch', args=[self.TEST_TENANT]))
self.assertTemplateUsed(res, 'switch_tenants.html')
def test_switch_tenants(self):
NEW_TENANT_ID = '6'
NEW_TENANT_NAME = 'FAKENAME'
TOKEN_ID = 1
self.setActiveUser(self.TEST_TOKEN, self.TEST_USER, self.TEST_TENANT,
False, self.TEST_SERVICE_CATALOG)
form_data = {'method': 'LoginWithTenant',
'password': self.PASSWORD,
'tenant': NEW_TENANT_ID,
'username': self.TEST_USER}
self.mox.StubOutWithMock(api, 'token_create')
aTenant = self.mox.CreateMock(api.Token)
aTenant.id = NEW_TENANT_ID
aTenant.name = NEW_TENANT_NAME
aToken = self.mox.CreateMock(api.Token)
aToken.id = TOKEN_ID
aToken.user = {'name': self.TEST_USER, 'roles': [{'name': 'fake'}]}
aToken.serviceCatalog = {}
aToken.tenant = {'id': aTenant.id, 'name': aTenant.name}
api.token_create(IsA(http.HttpRequest), NEW_TENANT_ID, self.TEST_USER,
self.PASSWORD).AndReturn(aToken)
self.mox.StubOutWithMock(api, 'tenant_list_for_token')
api.tenant_list_for_token(IsA(http.HttpRequest), aToken.id).\
AndReturn([aTenant])
self.mox.ReplayAll()
res = self.client.post(reverse('auth_switch', args=[NEW_TENANT_ID]),
form_data)
self.assertRedirectsNoFollow(res, reverse('dash_overview'))
self.assertEqual(self.client.session['tenant'], NEW_TENANT_NAME)
self.mox.VerifyAll()
def test_logout(self):
KEY = 'arbitraryKeyString'
VALUE = 'arbitraryKeyValue'
self.assertNotIn(KEY, self.client.session)
self.client.session[KEY] = VALUE
res = self.client.get(reverse('auth_logout'))
self.assertRedirectsNoFollow(res, reverse('splash'))
self.assertNotIn(KEY, self.client.session)
| apache-2.0 | 1,688,883,666,343,720,200 | 34.169565 | 78 | 0.598467 | false |
pyvideo/pyvideo | plugins/drop_no_publish.py | 1 | 3730 | import glob
import fnmatch
import json
import os
import pelican
from pelican import signals
# Monkey patch the ArticleGenerator to override specific method.
# We have to override the `_include_path` method because it does not
# filter files with enough specificity. If we wanted to filter a talk
# at `categories/jacksconf/videos/jacks-talk.json`, we could do so
# (without using this patch) by including `jacks-talk.json` in the
# list of IGNORE_FILES. However, if there are multiple `jacks-talk.json`
# files (maybe a bunch of different Jacks wanted to talk at a bunch of
# different conferences), we would exclude all `jacks-talk.json` files by
# including the single `jacks-talk.json` string in IGNORE_FILES. By checking
# the full path passed to `_include_path`, we can filter on a more granular
# level.
# Hopefully this patch can be removed by the merger of this PR:
# https://github.com/getpelican/pelican/pull/1975
class PyTubeArticlesGenerator(pelican.ArticlesGenerator):
def _include_path(self, path, extensions=None):
"""Inclusion logic for .get_files(), returns True/False
:param path: the path which might be including
:param extensions: the list of allowed extensions (if False, all
extensions are allowed)
"""
if extensions is None:
extensions = tuple(self.readers.extensions)
#check IGNORE_FILES
ignores = self.settings['IGNORE_FILES']
if any(fnmatch.fnmatch(path, ignore) for ignore in ignores):
return False
basename = os.path.basename(path)
if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores):
return False
if extensions is False or basename.endswith(extensions):
return True
return False
pelican.ArticlesGenerator = PyTubeArticlesGenerator
def drop_no_publish(pelican_proj_obj):
"""
Update IGNORE_FILES in pelicanconf with list of articles
that should be excluded based on their ID. The list of article
IDs that should be dropped is located in NO_PUBLISH_FILE.
"""
excludes = pelican_proj_obj.settings.get('IGNORE_FILES', [])
path = pelican_proj_obj.settings.get('PATH')
data_dir = pelican_proj_obj.settings.get('DATA_DIR')
no_publish_file = pelican_proj_obj.settings.get('NO_PUBLISH_FILE')
if not no_publish_file:
return
no_publish_file_path = os.path.join(path, data_dir, no_publish_file)
no_publish_ids = None
if os.path.exists(no_publish_file_path):
with open(no_publish_file_path, encoding='utf-8') as fp:
no_publish_ids = set(json.load(fp))
if not no_publish_ids:
return
paths = get_no_publish_paths(path, no_publish_ids)
pelican_proj_obj.settings['IGNORE_FILES'] = excludes + paths
def get_no_publish_paths(pelican_path, no_publish_ids):
search_pattern = os.path.join(pelican_path, '**/**/**/*.json')
paths = []
for file_path in glob.iglob(search_pattern):
with open(file_path) as fp:
try:
blob = json.load(fp)
except json.decoder.JSONDecodeError:
print(f'Could not decode {file_path}', flush=True)
continue
if isinstance(blob, dict):
file_id = blob.get('id')
if file_id in no_publish_ids:
paths.append(file_path)
no_publish_paths = []
# strip paths so that all paths start with from inside of pelican PATH dir.
for path in paths:
path = path.replace(pelican_path, '')
path = path.lstrip('/')
no_publish_paths.append(path)
return no_publish_paths
def register():
signals.initialized.connect(drop_no_publish)
| gpl-3.0 | -2,504,210,644,669,340,700 | 34.52381 | 79 | 0.664879 | false |
maoy/zknova | nova/tests/integrated/test_extensions.py | 1 | 1611 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.openstack.common import cfg
from nova.openstack.common.log import logging
from nova.tests.integrated import integrated_helpers
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ExtensionsTest(integrated_helpers._IntegratedTestBase):
def _get_flags(self):
f = super(ExtensionsTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.tests.api.openstack.compute.extensions.'
'foxinsocks.Foxinsocks')
return f
def test_get_foxnsocks(self):
# Simple check that fox-n-socks works.
response = self.api.api_request('/foxnsocks')
foxnsocks = response.read()
LOG.debug("foxnsocks: %s" % foxnsocks)
self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks)
| apache-2.0 | -2,133,619,132,568,090,000 | 38.292683 | 78 | 0.70329 | false |
soarpenguin/ansible | lib/ansible/modules/cloud/cloudstack/cs_configuration.py | 18 | 8413 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_configuration
short_description: Manages configuration on Apache CloudStack based clouds.
description:
- Manages global, zone, account, storage and cluster configurations.
version_added: "2.1"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the configuration.
required: true
value:
description:
- Value of the configuration.
required: true
account:
description:
- Ensure the value for corresponding account.
required: false
default: null
domain:
description:
- Domain the account is related to.
- Only considered if C(account) is used.
required: false
default: ROOT
zone:
description:
- Ensure the value for corresponding zone.
required: false
default: null
storage:
description:
- Ensure the value for corresponding storage pool.
required: false
default: null
cluster:
description:
- Ensure the value for corresponding cluster.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure global configuration
- local_action:
module: cs_configuration
name: router.reboot.when.outofband.migrated
value: false
# Ensure zone configuration
- local_action:
module: cs_configuration
name: router.reboot.when.outofband.migrated
zone: ch-gva-01
value: true
# Ensure storage configuration
- local_action:
module: cs_configuration
name: storage.overprovisioning.factor
storage: storage01
value: 2.0
# Ensure account configuration
- local_action:
module: cs_configuration
name: allow.public.user.templates
value: false
account: acme inc
domain: customers
'''
RETURN = '''
---
category:
description: Category of the configuration.
returned: success
type: string
sample: Advanced
scope:
description: Scope (zone/cluster/storagepool/account) of the parameter that needs to be updated.
returned: success
type: string
sample: storagepool
description:
description: Description of the configuration.
returned: success
type: string
sample: Setup the host to do multipath
name:
description: Name of the configuration.
returned: success
type: string
sample: zone.vlan.capacity.notificationthreshold
value:
description: Value of the configuration.
returned: success
type: string
sample: "0.75"
account:
description: Account of the configuration.
returned: success
type: string
sample: admin
Domain:
description: Domain of account of the configuration.
returned: success
type: string
sample: ROOT
zone:
description: Zone of the configuration.
returned: success
type: string
sample: ch-gva-01
cluster:
description: Cluster of the configuration.
returned: success
type: string
sample: cluster01
storage:
description: Storage of the configuration.
returned: success
type: string
sample: storage01
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackConfiguration(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackConfiguration, self).__init__(module)
self.returns = {
'category': 'category',
'scope': 'scope',
'value': 'value',
}
self.storage = None
self.account = None
self.cluster = None
def _get_common_configuration_args(self):
args = {
'name': self.module.params.get('name'),
'accountid': self.get_account(key='id'),
'storageid': self.get_storage(key='id'),
'zoneid': self.get_zone(key='id'),
'clusterid': self.get_cluster(key='id'),
}
return args
def get_zone(self, key=None):
# make sure we do net use the default zone
zone = self.module.params.get('zone')
if zone:
return super(AnsibleCloudStackConfiguration, self).get_zone(key=key)
def get_cluster(self, key=None):
if not self.cluster:
cluster_name = self.module.params.get('cluster')
if not cluster_name:
return None
args = {
'name': cluster_name,
}
clusters = self.query_api('listClusters', **args)
if clusters:
self.cluster = clusters['cluster'][0]
self.result['cluster'] = self.cluster['name']
else:
self.module.fail_json(msg="Cluster %s not found." % cluster_name)
return self._get_by_key(key=key, my_dict=self.cluster)
def get_storage(self, key=None):
if not self.storage:
storage_pool_name = self.module.params.get('storage')
if not storage_pool_name:
return None
args = {
'name': storage_pool_name,
}
storage_pools = self.query_api('listStoragePools', **args)
if storage_pools:
self.storage = storage_pools['storagepool'][0]
self.result['storage'] = self.storage['name']
else:
self.module.fail_json(msg="Storage pool %s not found." % storage_pool_name)
return self._get_by_key(key=key, my_dict=self.storage)
def get_configuration(self):
configuration = None
args = self._get_common_configuration_args()
configurations = self.query_api('listConfigurations', **args)
if not configurations:
self.module.fail_json(msg="Configuration %s not found." % args['name'])
configuration = configurations['configuration'][0]
return configuration
def get_value(self):
value = str(self.module.params.get('value'))
if value in ('True', 'False'):
value = value.lower()
return value
def present_configuration(self):
configuration = self.get_configuration()
args = self._get_common_configuration_args()
args['value'] = self.get_value()
if self.has_changed(args, configuration, ['value']):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateConfiguration', **args)
configuration = res['configuration']
return configuration
def get_result(self, configuration):
self.result = super(AnsibleCloudStackConfiguration, self).get_result(configuration)
if self.account:
self.result['account'] = self.account['name']
self.result['domain'] = self.domain['path']
elif self.zone:
self.result['zone'] = self.zone['name']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
value=dict(type='str', required=True),
zone=dict(),
storage=dict(),
cluster=dict(),
account=dict(),
domain=dict(default='ROOT')
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_configuration = AnsibleCloudStackConfiguration(module)
configuration = acs_configuration.present_configuration()
result = acs_configuration.get_result(configuration)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 4,783,205,968,975,773,000 | 28.409091 | 98 | 0.641065 | false |
SamiHiltunen/invenio-oauth2server | tests/helpers.py | 1 | 4827 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test helper functions."""
from __future__ import absolute_import, print_function
from flask import Blueprint, abort, jsonify, request, session, url_for
from flask_oauthlib.client import OAuth, prepare_request
from flask_security import url_for_security
from mock import MagicMock
from six.moves.urllib.parse import urlparse
from werkzeug.urls import url_decode, url_parse, url_unparse
def patch_request(app):
test_client = app.test_client()
def make_request(uri, headers=None, data=None, method=None):
uri, headers, data, method = prepare_request(
uri, headers, data, method
)
if not headers and data is not None:
headers = {
'Content-Type': ' application/x-www-form-urlencoded'
}
# test client is a `werkzeug.test.Client`
parsed = urlparse(uri)
uri = '%s?%s' % (parsed.path, parsed.query)
resp = test_client.open(
uri, headers=headers, data=data, method=method,
)
# for compatible
resp.code = resp.status_code
return resp, resp.data
return make_request
def parse_redirect(location, parse_fragment=False):
scheme, netloc, script_root, qs, anchor = url_parse(location)
return (
url_unparse((scheme, netloc, script_root, '', '')),
url_decode(anchor if parse_fragment else qs)
)
def login(test_client, email='[email protected]', password='tester'):
return test_client.post(
url_for_security('login'),
data={
'email': email,
'password': password,
})
def create_oauth_client(app, name, **kwargs):
"""Helper function to create a OAuth2 client to test an OAuth2 provider."""
blueprint = Blueprint('oauth2test', __name__, template_folder='templates')
default = dict(
consumer_key='confidential',
consumer_secret='confidential',
request_token_params={'scope': 'test:scope'},
request_token_url=None,
access_token_method='POST',
access_token_url='/oauth/token',
authorize_url='/oauth/authorize',
content_type='application/json',
)
default.update(kwargs)
oauth = OAuth(app)
remote = oauth.remote_app(name, **default)
@blueprint.route('/oauth2test/login')
def login():
return remote.authorize(callback=url_for('oauth2test.authorized',
_external=True))
@blueprint.route('/oauth2test/logout')
def logout():
session.pop('confidential_token', None)
return "logout"
@blueprint.route('/oauth2test/authorized')
@remote.authorized_handler
def authorized(resp):
if resp is None:
return 'Access denied: error=%s' % (
request.args.get('error', "unknown")
)
if isinstance(resp, dict) and 'access_token' in resp:
session['confidential_token'] = (resp['access_token'], '')
return jsonify(resp)
return str(resp)
def get_test(test_url):
if 'confidential_token' not in session:
abort(403)
else:
ret = remote.get(test_url)
if ret.status != 200:
return abort(ret.status)
return ret.raw_data
@blueprint.route('/oauth2test/test-ping')
def test_ping():
return get_test(url_for("invenio_oauth2server.ping"))
@blueprint.route('/oauth2test/test-info')
def test_info():
return get_test(url_for('invenio_oauth2server.info'))
@blueprint.route('/oauth2test/test-invalid')
def test_invalid():
return get_test(url_for('invenio_oauth2server.invalid'))
@remote.tokengetter
def get_oauth_token():
return session.get('confidential_token')
app.register_blueprint(blueprint)
return remote
| gpl-2.0 | -8,373,578,105,909,043,000 | 32.061644 | 79 | 0.641807 | false |
tmuic/mlat-server | mlat/server/mlattrack.py | 1 | 16064 | # -*- mode: python; indent-tabs-mode: nil -*-
# Part of mlat-server: a Mode S multilateration server
# Copyright (C) 2015 Oliver Jowett <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
The multilateration tracker: pairs up copies of the same message seen by more
than one receiver, clusters them by time, and passes them on to the solver to
derive positions.
"""
import json
import asyncio
import logging
import operator
import numpy
from contextlib import closing
import modes.message
from mlat import geodesy, constants, profile
from mlat.server import clocknorm, solver, config
glogger = logging.getLogger("mlattrack")
class MessageGroup:
def __init__(self, message, first_seen):
self.message = message
self.first_seen = first_seen
self.copies = []
self.handle = None
class MlatTracker(object):
def __init__(self, coordinator, blacklist_filename=None, pseudorange_filename=None):
self.pending = {}
self.coordinator = coordinator
self.tracker = coordinator.tracker
self.clock_tracker = coordinator.clock_tracker
self.blacklist_filename = blacklist_filename
self.read_blacklist()
self.coordinator.add_sighup_handler(self.read_blacklist)
self.pseudorange_file = None
self.pseudorange_filename = pseudorange_filename
if self.pseudorange_filename:
self.reopen_pseudoranges()
self.coordinator.add_sighup_handler(self.reopen_pseudoranges)
def read_blacklist(self):
s = set()
if self.blacklist_filename:
try:
with closing(open('mlat-blacklist.txt', 'r')) as f:
user = f.readline().strip()
if user:
s.add(user)
except FileNotFoundError:
pass
glogger.info("Read {n} blacklist entries".format(n=len(s)))
self.blacklist = s
def reopen_pseudoranges(self):
if self.pseudorange_file:
self.pseudorange_file.close()
self.pseudorange_file = None
self.pseudorange_file = open(self.pseudorange_filename, 'a')
@profile.trackcpu
def receiver_mlat(self, receiver, timestamp, message, utc):
# use message as key
group = self.pending.get(message)
if not group:
group = self.pending[message] = MessageGroup(message, utc)
group.handle = asyncio.get_event_loop().call_later(
config.MLAT_DELAY,
self._resolve,
group)
group.copies.append((receiver, timestamp, utc))
group.first_seen = min(group.first_seen, utc)
@profile.trackcpu
def _resolve(self, group):
del self.pending[group.message]
# less than 3 messages -> no go
if len(group.copies) < 3:
return
decoded = modes.message.decode(group.message)
ac = self.tracker.aircraft.get(decoded.address)
if not ac:
return
ac.mlat_message_count += 1
if not ac.allow_mlat:
glogger.info("not doing mlat for {0:06x}, wrong partition!".format(ac.icao))
return
# When we've seen a few copies of the same message, it's
# probably correct. Update the tracker with newly seen
# altitudes, squawks, callsigns.
if decoded.altitude is not None:
ac.altitude = decoded.altitude
ac.last_altitude_time = group.first_seen
if decoded.squawk is not None:
ac.squawk = decoded.squawk
if decoded.callsign is not None:
ac.callsign = decoded.callsign
# find old result, if present
if ac.last_result_position is None or (group.first_seen - ac.last_result_time) > 120:
last_result_position = None
last_result_var = 1e9
last_result_dof = 0
last_result_time = group.first_seen - 120
else:
last_result_position = ac.last_result_position
last_result_var = ac.last_result_var
last_result_dof = ac.last_result_dof
last_result_time = ac.last_result_time
# find altitude
if ac.altitude is None:
altitude = None
altitude_dof = 0
else:
altitude = ac.altitude * constants.FTOM
altitude_dof = 1
# construct a map of receiver -> list of timestamps
timestamp_map = {}
for receiver, timestamp, utc in group.copies:
if receiver.user not in self.blacklist:
timestamp_map.setdefault(receiver, []).append((timestamp, utc))
# check for minimum needed receivers
dof = len(timestamp_map) + altitude_dof - 4
if dof < 0:
return
# basic ratelimit before we do more work
elapsed = group.first_seen - last_result_time
if elapsed < 15.0 and dof < last_result_dof:
return
if elapsed < 2.0 and dof == last_result_dof:
return
# normalize timestamps. This returns a list of timestamp maps;
# within each map, the timestamp values are comparable to each other.
components = clocknorm.normalize(clocktracker=self.clock_tracker,
timestamp_map=timestamp_map)
# cluster timestamps into clusters that are probably copies of the
# same transmission.
clusters = []
min_component_size = 4 - altitude_dof
for component in components:
if len(component) >= min_component_size: # don't bother with orphan components at all
clusters.extend(_cluster_timestamps(component, min_component_size))
if not clusters:
return
# start from the most recent, largest, cluster
result = None
clusters.sort(key=lambda x: (x[0], x[1]))
while clusters and not result:
distinct, cluster_utc, cluster = clusters.pop()
# accept fewer receivers after 10s
# accept the same number of receivers after MLAT_DELAY - 0.5s
# accept more receivers immediately
elapsed = cluster_utc - last_result_time
dof = distinct + altitude_dof - 4
if elapsed < 10.0 and dof < last_result_dof:
break
if elapsed < (config.MLAT_DELAY - 0.5) and dof == last_result_dof:
break
# assume 250ft accuracy at the time it is reported
# (this bundles up both the measurement error, and
# that we don't adjust for local pressure)
#
# Then degrade the accuracy over time at ~4000fpm
if decoded.altitude is not None:
altitude_error = 250 * constants.FTOM
elif altitude is not None:
altitude_error = (250 + (cluster_utc - ac.last_altitude_time) * 70) * constants.FTOM
else:
altitude_error = None
cluster.sort(key=operator.itemgetter(1)) # sort by increasing timestamp (todo: just assume descending..)
r = solver.solve(cluster, altitude, altitude_error,
last_result_position if last_result_position else cluster[0][0].position)
if r:
# estimate the error
ecef, ecef_cov = r
if ecef_cov is not None:
var_est = numpy.trace(ecef_cov)
else:
# this result is suspect
var_est = 100e6
if var_est > 100e6:
# more than 10km, too inaccurate
continue
if elapsed < 2.0 and var_est > last_result_var * 1.1:
# less accurate than a recent position
continue
#if elapsed < 10.0 and var_est > last_result_var * 2.25:
# # much less accurate than a recent-ish position
# continue
# accept it
result = r
if not result:
return
ecef, ecef_cov = result
ac.last_result_position = ecef
ac.last_result_var = var_est
ac.last_result_dof = dof
ac.last_result_time = cluster_utc
ac.mlat_result_count += 1
if ac.kalman.update(cluster_utc, cluster, altitude, altitude_error, ecef, ecef_cov, distinct, dof):
ac.mlat_kalman_count += 1
if altitude is None:
_, _, solved_alt = geodesy.ecef2llh(ecef)
glogger.info("{addr:06x} solved altitude={solved_alt:.0f}ft with dof={dof}".format(
addr=decoded.address,
solved_alt=solved_alt*constants.MTOF,
dof=dof))
for handler in self.coordinator.output_handlers:
handler(cluster_utc, decoded.address,
ecef, ecef_cov,
[receiver for receiver, timestamp, error in cluster], distinct, dof,
ac.kalman)
if self.pseudorange_file:
cluster_state = []
t0 = cluster[0][1]
for receiver, timestamp, variance in cluster:
cluster_state.append([round(receiver.position[0], 0),
round(receiver.position[1], 0),
round(receiver.position[2], 0),
round((timestamp-t0)*1e6, 1),
round(variance*1e12, 2)])
state = {'icao': '{a:06x}'.format(a=decoded.address),
'time': round(cluster_utc, 3),
'ecef': [round(ecef[0], 0),
round(ecef[1], 0),
round(ecef[2], 0)],
'distinct': distinct,
'dof': dof,
'cluster': cluster_state}
if ecef_cov is not None:
state['ecef_cov'] = [round(ecef_cov[0, 0], 0),
round(ecef_cov[0, 1], 0),
round(ecef_cov[0, 2], 0),
round(ecef_cov[1, 0], 0),
round(ecef_cov[1, 1], 0),
round(ecef_cov[1, 2], 0),
round(ecef_cov[2, 0], 0),
round(ecef_cov[2, 1], 0),
round(ecef_cov[2, 2], 0)]
if altitude is not None:
state['altitude'] = round(altitude, 0)
state['altitude_error'] = round(altitude_error, 0)
json.dump(state, self.pseudorange_file)
self.pseudorange_file.write('\n')
@profile.trackcpu
def _cluster_timestamps(component, min_receivers):
"""Given a component that has normalized timestamps:
{
receiver: (variance, [(timestamp, utc), ...]), ...
receiver: (variance, [(timestamp, utc), ...]), ...
}, ...
return a list of clusters, where each cluster is a tuple:
(distinct, first_seen, [(receiver, timestamp, variance, utc), ...])
with distinct as the number of distinct receivers;
first_seen as the first UTC time seen in the cluster
"""
#glogger.info("cluster these:")
# flatten the component into a list of tuples
flat_component = []
for receiver, (variance, timestamps) in component.items():
for timestamp, utc in timestamps:
#glogger.info(" {r} {t:.1f}us {e:.1f}us".format(r=receiver.user, t=timestamp*1e6, e=error*1e6))
flat_component.append((receiver, timestamp, variance, utc))
# sort by timestamp
flat_component.sort(key=operator.itemgetter(1))
# do a rough clustering: groups of items with inter-item spacing of less than 2ms
group = [flat_component[0]]
groups = [group]
for t in flat_component[1:]:
if (t[1] - group[-1][1]) > 2e-3:
group = [t]
groups.append(group)
else:
group.append(t)
# inspect each group and produce clusters
# this is about O(n^2)-ish with group size, which
# is why we try to break up the component into
# smaller groups first.
#glogger.info("{n} groups".format(n=len(groups)))
clusters = []
for group in groups:
#glogger.info(" group:")
#for r, t, e in group:
# glogger.info(" {r} {t:.1f}us {e:.1f}us".format(r=r.user, t=t*1e6, e=e*1e6))
while len(group) >= min_receivers:
receiver, timestamp, variance, utc = group.pop()
cluster = [(receiver, timestamp, variance)]
last_timestamp = timestamp
distinct_receivers = 1
first_seen = utc
#glogger.info("forming cluster from group:")
#glogger.info(" 0 = {r} {t:.1f}us".format(r=head[0].user, t=head[1]*1e6))
for i in range(len(group) - 1, -1, -1):
receiver, timestamp, variance, utc = group[i]
#glogger.info(" consider {i} = {r} {t:.1f}us".format(i=i, r=receiver.user, t=timestamp*1e6))
if (last_timestamp - timestamp) > 2e-3:
# Can't possibly be part of the same cluster.
#
# Note that this is a different test to the rough grouping above:
# that looks at the interval betwen _consecutive_ items, so a
# group might span a lot more than 2ms!
#glogger.info(" discard: >2ms out")
break
# strict test for range, now.
is_distinct = can_cluster = True
for other_receiver, other_timestamp, other_variance in cluster:
if other_receiver is receiver:
#glogger.info(" discard: duplicate receiver")
can_cluster = False
break
d = receiver.distance[other_receiver]
if abs(other_timestamp - timestamp) > (d * 1.05 + 1e3) / constants.Cair:
#glogger.info(" discard: delta {dt:.1f}us > max {m:.1f}us for range {d:.1f}m".format(
# dt=abs(other_timestamp - timestamp)*1e6,
# m=(d * 1.05 + 1e3) / constants.Cair*1e6,
# d=d))
can_cluster = False
break
if d < 1e3:
# if receivers are closer than 1km, then
# only count them as one receiver for the 3-receiver
# requirement
#glogger.info(" not distinct vs receiver {r}".format(r=other_receiver.user))
is_distinct = False
if can_cluster:
#glogger.info(" accept")
cluster.append((receiver, timestamp, variance))
first_seen = min(first_seen, utc)
del group[i]
if is_distinct:
distinct_receivers += 1
if distinct_receivers >= min_receivers:
cluster.reverse() # make it ascending timestamps again
clusters.append((distinct_receivers, first_seen, cluster))
return clusters
| agpl-3.0 | -284,360,831,872,270,050 | 37.338902 | 117 | 0.547186 | false |
ngageoint/gamification-server | gamification/events/models.py | 1 | 4738 | # -*- coding: utf-8 -*-
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, as long as
# any reuse or further development of the software attributes the
# National Geospatial-Intelligence Agency (NGA) authorship as follows:
# 'This software (gamification-server)
# is provided to the public as a courtesy of the National
# Geospatial-Intelligence Agency.
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import datetime
from django.contrib.auth.models import User
from gamification.core.models import Project, ProjectBadge
from django.db import models
class Event(models.Model):
"""
An Event is an action reported by an external system
"""
user = models.ForeignKey(User)
project = models.ForeignKey(Project)
event_dtg = models.DateTimeField('event date')
details = models.TextField(editable=False)
def __init__(self, *args, **kw):
# dictionary for the details of the event
self.details_map = {}
super(Event, self).__init__(*args, **kw)
if self.details:
try:
self.details_map = json.loads(self.details)
except TypeError:
self.details_map = self.details
def save(self, *args, **kw):
self.details = json.dumps(self.details_map)
super(Event, self).save(*args, **kw)
@property
def dtg(self):
return self.event_dtg
@property
def details_map(self):
return self._details_map
@details_map.setter
def details_map(self, details_map):
self._details_map = details_map
@property
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = state
@property
def is_current_event(self):
return self._current_event
@property
def is_today(self):
return datetime.datetime.date() == event_dtg.date()
@state.setter
def current_event(self, current_event_id):
self._current_event = (self.id == current_event_id)
# Adds specified event data to the state
def update_state(self, outer_key, inner_key, inner_value):
try:
if not outer_key in self._state.event_data:
self._state._event_data[outer_key] = {}
self._state._event_data[outer_key][inner_key] = inner_value
except AttributeError:
print 'AttributeError'
pass
class Policy(models.Model):
"""
A Policy is a condition - action specifier for the rules engine. Will include 1 or more Rules
"""
STATE_POLICY = 0
AWARD_POLICY = 1
POLICY_CHOICES = ( (STATE_POLICY, 'State Policy'), (AWARD_POLICY, 'Award Policy'), )
project = models.ForeignKey(Project)
projectbadge = models.ForeignKey(ProjectBadge)
type = models.IntegerField(choices=POLICY_CHOICES)
rule = models.TextField()
def __unicode__(self):
try:
kind = self.POLICY_CHOICES[self.projectbadge.type][1]
except:
kind = 'Policy'
return u"%s for %s on %s" % (kind, self.projectbadge.name, self.project.name)
#class Rule(models.Model):
# """
# A Rule is a decision specifier that will be the basis for a Policy
# """
#
# name = models.CharField(max_length=100)
# policy = models.ForeignKey(Policy)
# badge = models.ForeignKey(ProjectBadge)
# conditions = models.TextField(editable=False)
#
# def __init__(self, *args, **kw):
# # dictionary for the details of the event
# self.conditions_list = []
# super(Event, self).__init__(*args, **kw)
# if self.conditions:
# self.conditions_list = json.loads(self.conditions)
#
# def save(self, *args, **kw):
# self.conditions = json.dumps(self.conditions_list)
# super(Event, self).save(*args, **kw) | mit | -5,676,945,474,821,291,000 | 33.093525 | 97 | 0.658084 | false |
hb9cwp/wireshark-jdsu | tools/netscreen2dump.py | 3 | 4427 | #!/usr/bin/env python
"""
Converts netscreen snoop hex-dumps to a hex-dump that text2pcap can read.
$Id$
Copyright (c) 2004 by Gilbert Ramirez <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
import sys
import re
import os
import stat
import time
class OutputFile:
TIMER_MAX = 99999.9
def __init__(self, name, base_time):
try:
self.fh = open(name, "w")
except IOError, err:
sys.exit(err)
self.base_time = base_time
self.prev_timestamp = 0.0
def PrintPacket(self, timestamp, datalines):
# What do to with the timestamp? I need more data about what
# the netscreen timestamp is, then I can generate one for the text file.
# print "TS:", timestamp.group("time")
try:
timestamp = float(timestamp.group("time"))
except ValueError:
sys.exit("Unable to convert '%s' to floating point." % \
(timestamp,))
# Did we wrap around the timeer max?
if timestamp < self.prev_timestamp:
self.base_time += self.TIMER_MAX
self.prev_timestamp = timestamp
packet_timestamp = self.base_time + timestamp
# Determine the time string to print
gmtime = time.gmtime(packet_timestamp)
subsecs = packet_timestamp - int(packet_timestamp)
assert subsecs <= 0
subsecs = int(subsecs * 10)
print >> self.fh, "%s.%d" % (time.strftime("%Y-%m-%d %H:%M:%S", gmtime), \
subsecs)
# Print the packet data
offset = 0
for lineno, hexgroup in datalines:
hexline = hexgroup.group("hex")
hexpairs = hexline.split()
print >> self.fh, "%08x %s" % (offset, hexline)
offset += len(hexpairs)
# Blank line
print >> self.fh
# Find a timestamp line
re_timestamp = re.compile(r"^(?P<time>\d+\.\d): [\w/]+\((?P<io>.)\)(:| len=)")
# Find a hex dump line
re_hex_line = re.compile(r"(?P<hex>([0-9a-f]{2} ){1,16})\s+(?P<ascii>.){1,16}")
def run(input_filename, output_filename):
try:
ifh = open(input_filename, "r")
except IOError, err:
sys.exit(err)
# Get the file's creation time.
try:
ctime = os.stat(input_filename)[stat.ST_CTIME]
except OSError, err:
sys.exit(err)
output_file = OutputFile(output_filename, ctime)
timestamp = None
datalines = []
lineno = 0
for line in ifh.xreadlines():
lineno += 1
# If we have no timestamp yet, look for one
if not timestamp:
m = re_timestamp.search(line)
if m:
timestamp = m
# Otherwise, look for hex dump lines
else:
m = re_hex_line.search(line)
if m:
datalines.append((lineno, m))
else:
# If we have been gathering hex dump lines,
# and this line is not a hex dump line, then the hex dump
# has finished, and so has the packet. So print the packet
# and reset our variables so we can look for the next packet.
if datalines:
output_file.PrintPacket(timestamp, datalines)
timestamp = None
datalines = []
# At the end of the file we may still have hex dump data in memory.
# If so, print the packet
if datalines:
output_file.PrintPacket(timestamp, datalines)
timestamp = None
datalines = []
def usage():
print >> sys.stderr, "Usage: netscreen2dump.py netscreen-dump-file new-dump-file"
sys.exit(1)
def main():
if len(sys.argv) != 3:
usage()
run(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| gpl-2.0 | 7,040,647,125,596,598,000 | 29.321918 | 85 | 0.595437 | false |
liavkoren/djangoDev | django/contrib/admin/views/main.py | 13 | 16170 | from collections import OrderedDict
import sys
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
from django.core.paginator import InvalidPage
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import ugettext, ugettext_lazy
from django.utils.http import urlencode
from django.contrib.admin import FieldListFilter
from django.contrib.admin.exceptions import DisallowedModelAdminLookup
from django.contrib.admin.options import IncorrectLookupParameters, IS_POPUP_VAR, TO_FIELD_VAR
from django.contrib.admin.utils import (quote, get_fields_from_path,
lookup_needs_distinct, prepare_lookup_value)
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (
ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR)
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = ugettext_lazy('(None)')
class ChangeList(object):
def __init__(self, request, model, list_display, list_display_links,
list_filter, date_hierarchy, search_fields, list_select_related,
list_per_page, list_max_show_all, list_editable, model_admin):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_queryset = model_admin.get_queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_max_show_all = list_max_show_all
self.model_admin = model_admin
self.preserved_filters = model_admin.get_preserved_filters(request)
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
if self.is_popup:
self.list_editable = ()
else:
self.list_editable = list_editable
self.query = request.GET.get(SEARCH_VAR, '')
self.queryset = self.get_queryset(request)
self.get_results(request)
if self.is_popup:
title = ugettext('Select %s')
else:
title = ugettext('Select %s to change')
self.title = title % force_text(self.opts.verbose_name)
self.pk_attname = self.lookup_opts.pk.attname
def get_filters_params(self, params=None):
"""
Returns all params except IGNORED_PARAMS
"""
if not params:
params = self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
for key, value in lookup_params.items():
if not self.model_admin.lookup_allowed(key, value):
raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(request, lookup_params,
self.model, self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field, field_list_filter_class = list_filter, FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model, field_path)[-1]
spec = field_list_filter_class(field, request, lookup_params,
self.model, self.model_admin, field_path=field_path)
# Check if we need to use distinct()
use_distinct = (use_distinct or
lookup_needs_distinct(self.lookup_opts,
field_path))
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = (use_distinct or
lookup_needs_distinct(self.lookup_opts, key))
return filter_specs, bool(filter_specs), lookup_params, use_distinct
except FieldDoesNotExist as e:
six.reraise(IncorrectLookupParameters, IncorrectLookupParameters(e), sys.exc_info()[2])
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization:
# full_result_count is equal to paginator.count if no filters
# were applied
if self.get_filters_params() or self.params.get(SEARCH_VAR):
full_result_count = self.root_queryset.count()
else:
full_result_count = result_count
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.queryset._clone()
else:
try:
result_list = paginator.page(self.page_num + 1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.lookup_opts.get_field(field_name)
return field.name
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.model_admin.get_ordering(request)
or self._get_default_ordering())
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
# reverse order if order_field has already "-" as prefix
if order_field.startswith('-') and pfx == "-":
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.lookup_opts.pk.name
if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns an OrderedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = OrderedDict()
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request):
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
filters_use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.root_queryset
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.model_admin.get_search_results(
request, qs, self.query)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.list_select_related is True:
return qs.select_related()
if self.list_select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.list_select_related:
return qs.select_related(*self.list_select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(field.rel, models.ManyToOneRel):
return True
return False
def url_for_result(self, result):
pk = getattr(result, self.pk_attname)
return reverse('admin:%s_%s_change' % (self.opts.app_label,
self.opts.model_name),
args=(quote(pk),),
current_app=self.model_admin.admin_site.name)
| bsd-3-clause | -6,431,879,351,162,525,000 | 41.552632 | 99 | 0.585096 | false |
lud4ik/txAWS | txaws/s3/client.py | 1 | 20360 | # Copyright (C) 2008 Tristan Seligmann <[email protected]>
# Copyright (C) 2009 Canonical Ltd
# Copyright (C) 2009 Duncan McGreggor <[email protected]>
# Copyright (C) 2012 New Dream Network (DreamHost)
# Licenced under the txaws licence available at /LICENSE in the txaws source.
"""
Client wrapper for Amazon's Simple Storage Service.
API stability: unstable.
Various API-incompatible changes are planned in order to expose missing
functionality in this wrapper.
"""
import mimetypes
from twisted.web.http import datetimeToString
from dateutil.parser import parse as parseTime
from txaws.client.base import BaseClient, BaseQuery, error_wrapper
from txaws.s3.acls import AccessControlPolicy
from txaws.s3.model import (
Bucket, BucketItem, BucketListing, ItemOwner, LifecycleConfiguration,
LifecycleConfigurationRule, NotificationConfiguration, RequestPayment,
VersioningConfiguration, WebsiteConfiguration)
from txaws.s3.exception import S3Error
from txaws.service import AWSServiceEndpoint, S3_ENDPOINT
from txaws.util import XML, calculate_md5
def s3_error_wrapper(error):
error_wrapper(error, S3Error)
class URLContext(object):
"""
The hosts and the paths that form an S3 endpoint change depending upon the
context in which they are called. While S3 supports bucket names in the
host name, we use the convention of providing it in the path so that
using IP addresses and alternative implementations of S3 actually works
(e.g. Walrus).
"""
def __init__(self, service_endpoint, bucket="", object_name=""):
self.endpoint = service_endpoint
self.bucket = bucket
self.object_name = object_name
def get_host(self):
return self.endpoint.get_host()
def get_path(self):
path = "/"
if self.bucket is not None:
path += self.bucket
if self.bucket is not None and self.object_name:
if not self.object_name.startswith("/"):
path += "/"
path += self.object_name
elif self.bucket is not None and not path.endswith("/"):
path += "/"
return path
def get_url(self):
if self.endpoint.port is not None:
return "%s://%s:%d%s" % (
self.endpoint.scheme, self.get_host(), self.endpoint.port,
self.get_path())
else:
return "%s://%s%s" % (
self.endpoint.scheme, self.get_host(), self.get_path())
class S3Client(BaseClient):
"""A client for S3."""
def __init__(self, creds=None, endpoint=None, query_factory=None):
if query_factory is None:
query_factory = Query
super(S3Client, self).__init__(creds, endpoint, query_factory)
def list_buckets(self):
"""
List all buckets.
Returns a list of all the buckets owned by the authenticated sender of
the request.
"""
query = self.query_factory(
action="GET", creds=self.creds, endpoint=self.endpoint)
d = query.submit()
return d.addCallback(self._parse_list_buckets)
def _parse_list_buckets(self, xml_bytes):
"""
Parse XML bucket list response.
"""
root = XML(xml_bytes)
buckets = []
for bucket_data in root.find("Buckets"):
name = bucket_data.findtext("Name")
date_text = bucket_data.findtext("CreationDate")
date_time = parseTime(date_text)
bucket = Bucket(name, date_time)
buckets.append(bucket)
return buckets
def create_bucket(self, bucket):
"""
Create a new bucket.
"""
query = self.query_factory(
action="PUT", creds=self.creds, endpoint=self.endpoint,
bucket=bucket)
return query.submit()
def delete_bucket(self, bucket):
"""
Delete a bucket.
The bucket must be empty before it can be deleted.
"""
query = self.query_factory(
action="DELETE", creds=self.creds, endpoint=self.endpoint,
bucket=bucket)
return query.submit()
def get_bucket(self, bucket):
"""
Get a list of all the objects in a bucket.
"""
query = self.query_factory(
action="GET", creds=self.creds, endpoint=self.endpoint,
bucket=bucket)
d = query.submit()
return d.addCallback(self._parse_get_bucket)
def _parse_get_bucket(self, xml_bytes):
root = XML(xml_bytes)
name = root.findtext("Name")
prefix = root.findtext("Prefix")
marker = root.findtext("Marker")
max_keys = root.findtext("MaxKeys")
is_truncated = root.findtext("IsTruncated")
contents = []
for content_data in root.findall("Contents"):
key = content_data.findtext("Key")
date_text = content_data.findtext("LastModified")
modification_date = parseTime(date_text)
etag = content_data.findtext("ETag")
size = content_data.findtext("Size")
storage_class = content_data.findtext("StorageClass")
owner_id = content_data.findtext("Owner/ID")
owner_display_name = content_data.findtext("Owner/DisplayName")
owner = ItemOwner(owner_id, owner_display_name)
content_item = BucketItem(key, modification_date, etag, size,
storage_class, owner)
contents.append(content_item)
common_prefixes = []
for prefix_data in root.findall("CommonPrefixes"):
common_prefixes.append(prefix_data.text)
return BucketListing(name, prefix, marker, max_keys, is_truncated,
contents, common_prefixes)
def get_bucket_location(self, bucket):
"""
Get the location (region) of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's region.
"""
query = self.query_factory(action="GET", creds=self.creds,
endpoint=self.endpoint, bucket=bucket,
object_name="?location")
d = query.submit()
return d.addCallback(self._parse_bucket_location)
def _parse_bucket_location(self, xml_bytes):
"""Parse a C{LocationConstraint} XML document."""
root = XML(xml_bytes)
return root.text or ""
def get_bucket_lifecycle(self, bucket):
"""
Get the lifecycle configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's lifecycle
configuration.
"""
query = self.query_factory(
action='GET', creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name='?lifecycle')
return query.submit().addCallback(self._parse_lifecycle_config)
def _parse_lifecycle_config(self, xml_bytes):
"""Parse a C{LifecycleConfiguration} XML document."""
root = XML(xml_bytes)
rules = []
for content_data in root.findall("Rule"):
id = content_data.findtext("ID")
prefix = content_data.findtext("Prefix")
status = content_data.findtext("Status")
expiration = int(content_data.findtext("Expiration/Days"))
rules.append(
LifecycleConfigurationRule(id, prefix, status, expiration))
return LifecycleConfiguration(rules)
def get_bucket_website_config(self, bucket):
"""
Get the website configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's website
configuration.
"""
query = self.query_factory(
action='GET', creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name='?website')
return query.submit().addCallback(self._parse_website_config)
def _parse_website_config(self, xml_bytes):
"""Parse a C{WebsiteConfiguration} XML document."""
root = XML(xml_bytes)
index_suffix = root.findtext("IndexDocument/Suffix")
error_key = root.findtext("ErrorDocument/Key")
return WebsiteConfiguration(index_suffix, error_key)
def get_bucket_notification_config(self, bucket):
"""
Get the notification configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will request the bucket's notification
configuration.
"""
query = self.query_factory(
action='GET', creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name='?notification')
return query.submit().addCallback(self._parse_notification_config)
def _parse_notification_config(self, xml_bytes):
"""Parse a C{NotificationConfiguration} XML document."""
root = XML(xml_bytes)
topic = root.findtext("TopicConfiguration/Topic")
event = root.findtext("TopicConfiguration/Event")
return NotificationConfiguration(topic, event)
def get_bucket_versioning_config(self, bucket):
"""
Get the versioning configuration of a bucket.
@param bucket: The name of the bucket. @return: A C{Deferred} that
will request the bucket's versioning configuration.
"""
query = self.query_factory(
action='GET', creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name='?versioning')
return query.submit().addCallback(self._parse_versioning_config)
def _parse_versioning_config(self, xml_bytes):
"""Parse a C{VersioningConfiguration} XML document."""
root = XML(xml_bytes)
mfa_delete = root.findtext("MfaDelete")
status = root.findtext("Status")
return VersioningConfiguration(mfa_delete=mfa_delete, status=status)
def get_bucket_acl(self, bucket):
"""
Get the access control policy for a bucket.
"""
query = self.query_factory(
action='GET', creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name='?acl')
return query.submit().addCallback(self._parse_acl)
def put_bucket_acl(self, bucket, access_control_policy):
"""
Set access control policy on a bucket.
"""
data = access_control_policy.to_xml()
query = self.query_factory(
action='PUT', creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name='?acl', data=data)
return query.submit().addCallback(self._parse_acl)
def _parse_acl(self, xml_bytes):
"""
Parse an C{AccessControlPolicy} XML document and convert it into an
L{AccessControlPolicy} instance.
"""
return AccessControlPolicy.from_xml(xml_bytes)
def put_object(self, bucket, object_name, data, content_type=None,
metadata={}, amz_headers={}):
"""
Put an object in a bucket.
An existing object with the same name will be replaced.
@param bucket: The name of the bucket.
@param object: The name of the object.
@param data: The data to write.
@param content_type: The type of data being written.
@param metadata: A C{dict} used to build C{x-amz-meta-*} headers.
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: A C{Deferred} that will fire with the result of request.
"""
query = self.query_factory(
action="PUT", creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name=object_name, data=data,
content_type=content_type, metadata=metadata,
amz_headers=amz_headers)
return query.submit()
def copy_object(self, source_bucket, source_object_name, dest_bucket=None,
dest_object_name=None, metadata={}, amz_headers={}):
"""
Copy an object stored in S3 from a source bucket to a destination
bucket.
@param source_bucket: The S3 bucket to copy the object from.
@param source_object_name: The name of the object to copy.
@param dest_bucket: Optionally, the S3 bucket to copy the object to.
Defaults to C{source_bucket}.
@param dest_object_name: Optionally, the name of the new object.
Defaults to C{source_object_name}.
@param metadata: A C{dict} used to build C{x-amz-meta-*} headers.
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: A C{Deferred} that will fire with the result of request.
"""
dest_bucket = dest_bucket or source_bucket
dest_object_name = dest_object_name or source_object_name
amz_headers["copy-source"] = "/%s/%s" % (source_bucket,
source_object_name)
query = self.query_factory(
action="PUT", creds=self.creds, endpoint=self.endpoint,
bucket=dest_bucket, object_name=dest_object_name,
metadata=metadata, amz_headers=amz_headers)
return query.submit()
def get_object(self, bucket, object_name):
"""
Get an object from a bucket.
"""
query = self.query_factory(
action="GET", creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name=object_name)
return query.submit()
def head_object(self, bucket, object_name):
"""
Retrieve object metadata only.
"""
query = self.query_factory(
action="HEAD", creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name=object_name)
d = query.submit()
return d.addCallback(query.get_response_headers)
def delete_object(self, bucket, object_name):
"""
Delete an object from a bucket.
Once deleted, there is no method to restore or undelete an object.
"""
query = self.query_factory(
action="DELETE", creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name=object_name)
return query.submit()
def put_object_acl(self, bucket, object_name, access_control_policy):
"""
Set access control policy on an object.
"""
data = access_control_policy.to_xml()
query = self.query_factory(
action='PUT', creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name='%s?acl' % object_name, data=data)
return query.submit().addCallback(self._parse_acl)
def get_object_acl(self, bucket, object_name):
"""
Get the access control policy for an object.
"""
query = self.query_factory(
action='GET', creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name='%s?acl' % object_name)
return query.submit().addCallback(self._parse_acl)
def put_request_payment(self, bucket, payer):
"""
Set request payment configuration on bucket to payer.
@param bucket: The name of the bucket.
@param payer: The name of the payer.
@return: A C{Deferred} that will fire with the result of the request.
"""
data = RequestPayment(payer).to_xml()
query = self.query_factory(
action="PUT", creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name="?requestPayment", data=data)
return query.submit()
def get_request_payment(self, bucket):
"""
Get the request payment configuration on a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the name of the payer.
"""
query = self.query_factory(
action="GET", creds=self.creds, endpoint=self.endpoint,
bucket=bucket, object_name="?requestPayment")
return query.submit().addCallback(self._parse_get_request_payment)
def _parse_get_request_payment(self, xml_bytes):
"""
Parse a C{RequestPaymentConfiguration} XML document and extract the
payer.
"""
return RequestPayment.from_xml(xml_bytes).payer
class Query(BaseQuery):
"""A query for submission to the S3 service."""
def __init__(self, bucket=None, object_name=None, data="",
content_type=None, metadata={}, amz_headers={}, *args,
**kwargs):
super(Query, self).__init__(*args, **kwargs)
self.bucket = bucket
self.object_name = object_name
self.data = data
self.content_type = content_type
self.metadata = metadata
self.amz_headers = amz_headers
self.date = datetimeToString()
if not self.endpoint or not self.endpoint.host:
self.endpoint = AWSServiceEndpoint(S3_ENDPOINT)
self.endpoint.set_method(self.action)
def set_content_type(self):
"""
Set the content type based on the file extension used in the object
name.
"""
if self.object_name and not self.content_type:
# XXX nothing is currently done with the encoding... we may
# need to in the future
self.content_type, encoding = mimetypes.guess_type(
self.object_name, strict=False)
def get_headers(self):
"""
Build the list of headers needed in order to perform S3 operations.
"""
headers = {"Content-Length": len(self.data),
"Content-MD5": calculate_md5(self.data),
"Date": self.date}
for key, value in self.metadata.iteritems():
headers["x-amz-meta-" + key] = value
for key, value in self.amz_headers.iteritems():
headers["x-amz-" + key] = value
# Before we check if the content type is set, let's see if we can set
# it by guessing the the mimetype.
self.set_content_type()
if self.content_type is not None:
headers["Content-Type"] = self.content_type
if self.creds is not None:
signature = self.sign(headers)
headers["Authorization"] = "AWS %s:%s" % (
self.creds.access_key, signature)
return headers
def get_canonicalized_amz_headers(self, headers):
"""
Get the headers defined by Amazon S3.
"""
headers = [
(name.lower(), value) for name, value in headers.iteritems()
if name.lower().startswith("x-amz-")]
headers.sort()
# XXX missing spec implementation:
# 1) txAWS doesn't currently combine headers with the same name
# 2) txAWS doesn't currently unfold long headers
return "".join("%s:%s\n" % (name, value) for name, value in headers)
def get_canonicalized_resource(self):
"""
Get an S3 resource path.
"""
path = "/"
if self.bucket is not None:
path += self.bucket
if self.bucket is not None and self.object_name:
if not self.object_name.startswith("/"):
path += "/"
path += self.object_name
elif self.bucket is not None and not path.endswith("/"):
path += "/"
return path
def sign(self, headers):
"""Sign this query using its built in credentials."""
text = (self.action + "\n" +
headers.get("Content-MD5", "") + "\n" +
headers.get("Content-Type", "") + "\n" +
headers.get("Date", "") + "\n" +
self.get_canonicalized_amz_headers(headers) +
self.get_canonicalized_resource())
return self.creds.sign(text, hash_type="sha1")
def submit(self, url_context=None):
"""Submit this query.
@return: A deferred from get_page
"""
if not url_context:
url_context = URLContext(
self.endpoint, self.bucket, self.object_name)
d = self.get_page(
url_context.get_url(), method=self.action, postdata=self.data,
headers=self.get_headers())
return d.addErrback(s3_error_wrapper)
| mit | 3,304,984,709,359,625,700 | 37.198874 | 78 | 0.602358 | false |
alexandrucoman/bcbio-nextgen-vm | bcbiovm/provider/aws/vpc.py | 1 | 5166 | """
Create a VPC and associated resources for running bcbio on AWS.
"""
import re
import boto.ec2
from bcbiovm import log as logging
from bcbiovm.common import cluster as clusterops
from bcbiovm.common import constant
LOG = logging.get_logger(__name__)
class VirtualPrivateCloud(object):
"""Create and setup the Virtual Private Cloud."""
_GATEWAY_TAG = "%(cluster)s_gw"
_SUBNET_TAG = "%(cluster)s_cluster"
_CLUSTER_SG = "%(cluster)s_cluster_sg"
_RTABLE_TAG = "%(cluster)s_rtable"
def __init__(self, cluster, config, network, recreate):
"""
:param config: elasticluster config file
:param cluster: cluster name
:param network: network to use for the VPC, in CIDR
notation (a.b.c.d/e)
:param recreate: whether to recreate the VPC if exists
"""
self._cluster_name = cluster
self._network = network
self._recreate = recreate
ecluster = clusterops.ElastiCluster(constant.PROVIDER.AWS)
ecluster.load_config(config)
cluster_config = ecluster.get_config(cluster)
self._key_id = cluster_config['cloud']['ec2_access_key']
self._access_key = cluster_config['cloud']['ec2_secret_key']
self._check_network()
def _check_network(self):
"""Check if the received network is valid."""
cidr_regex = r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/\d{1,2}$'
if not re.search(cidr_regex, self._network):
raise ValueError(
'Network %(network)s is not in CIDR (a.b.c.d/e) format.' %
{"network": self._network})
netmask = int(self._network.split('/')[1])
if netmask > 23:
raise ValueError('Network must be at least a /23 in size.')
def _recreate_vpc(self, connection):
"""Remove and recreate the VPC, destroying all AWS resources
contained in it.
"""
existing_vpcs = connection.get_all_vpcs(
filters={'tag:Name': self._cluster_name})
if not existing_vpcs:
return
raise NotImplementedError(
"bcbio does not currently remove VPCs. "
"The easiest way is to do this manually in the console: "
"https://console.aws.amazon.com/vpc/home")
# FIXME(chapmanb): This doesn't automatically remove resources
# in the VPC like the AWS management console does.
# connection.delete_vpc(existing_vpcs[0].id)
def _create_security_group(self, connection, vpc):
"""Create the security group for bcbio."""
name = ('%(cluster)s_cluster_security_group' %
{"cluster": self._cluster_name})
security_group = connection.create_security_group(
name, 'bcbio cluster nodes', vpc.id)
security_group.authorize(ip_protocol='tcp', from_port=22, to_port=22,
cidr_ip='0.0.0.0/0')
security_group.authorize(ip_protocol='-1', src_group=security_group)
def _create_network(self, connection, vpc):
"""Create and setup the network for the VPC."""
gw_tag = "%(cluster)s_gw" % {"cluster": self._cluster_name}
rt_tag = "%(cluster)s_rtable" % {"cluster": self._cluster_name}
subnet_tag = "%(cluster)s_cluster" % {"cluster": self._cluster_name}
compute_subnet = '%(net)s/24' % {"net": self._network.split('/')[0]}
internet_gateway = connection.create_internet_gateway()
internet_gateway.add_tag('Name', gw_tag)
connection.attach_internet_gateway(internet_gateway.id, vpc.id)
route_table = connection.create_route_table(vpc.id)
route_table.add_tag('Name', rt_tag)
connection.create_route(route_table.id, '0.0.0.0/0',
internet_gateway.id)
subnet = connection.create_subnet(vpc.id, compute_subnet)
subnet.add_tag('Name', subnet_tag)
connection.associate_route_table(route_table.id, subnet.id)
def _setup_placement_group(self, connection):
"""Setup the placement group for the current VPC."""
name = "%(cluster)s_cluster_pg" % {"cluster": self._cluster_name}
placement_groups = connection.get_all_placement_groups()
if name in [pgroup.name for pgroup in placement_groups]:
LOG.info("Refreshing placement group %(name)s.", {"name": name})
connection.delete_placement_group(name)
connection.create_placement_group(name)
LOG.info("Placement group %(name)s created.", {"name": name})
def run(self):
"""Create and setup the Virtual Private Cloud."""
connection = boto.connect_vpc(
aws_access_key_id=self._key_id,
aws_secret_access_key=self._access_key)
if self._recreate:
vpc = self._recreate_vpc(connection)
else:
vpc = connection.create_vpc(self._network)
vpc.add_tag('Name', self._cluster_name)
self._create_security_group(connection, vpc)
self._create_network(connection, vpc)
self._setup_placement_group(connection)
| mit | -8,731,287,913,822,601,000 | 38.136364 | 77 | 0.600658 | false |
awsdocs/aws-doc-sdk-examples | python/example_code/greengrass/snippets/getting_started_connectors.py | 1 | 1240 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to implement an AWS Lambda function that publishes messages to an
AWS IoT Greengrass connector.
"""
# snippet-start:[greengrass.python.getting-started-connectors.complete]
import json
import random
import greengrasssdk
iot_client = greengrasssdk.client('iot-data')
send_topic = 'twilio/txt'
def create_request(event):
return {
"request": {
"recipient": {
"name": event['to_name'],
"phone_number": event['to_number'],
"message": f"temperature:{event['temperature']}"}},
"id": f"request_{random.randint(1, 101)}"}
# Publish to the Twilio Notifications connector through the twilio/txt topic.
def function_handler(event, context):
temperature = event['temperature']
# If temperature is greater than 30C, send a notification.
if temperature > 30:
message = create_request(event)
iot_client.publish(topic='twilio/txt', payload=json.dumps(message))
print(f'Published: {message}')
print(f'Temperature: {temperature}')
# snippet-end:[greengrass.python.getting-started-connectors.complete]
| apache-2.0 | 591,161,710,055,551,500 | 29.243902 | 77 | 0.679839 | false |
ShineFan/odoo | addons/google_calendar/google_calendar.py | 59 | 51310 | # -*- coding: utf-8 -*-
import operator
import simplejson
import urllib2
import openerp
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, exception_to_unicode
from openerp.tools.translate import _
from openerp.http import request
from datetime import datetime, timedelta
from dateutil import parser
import pytz
from openerp.osv import fields, osv
import logging
_logger = logging.getLogger(__name__)
def status_response(status, substr=False):
if substr:
return int(str(status)[0])
else:
return status_response(status, substr=True) == 2
class Meta(type):
""" This Meta class allow to define class as a structure, and so instancied variable
in __init__ to avoid to have side effect alike 'static' variable """
def __new__(typ, name, parents, attrs):
methods = dict((k, v) for k, v in attrs.iteritems()
if callable(v))
attrs = dict((k, v) for k, v in attrs.iteritems()
if not callable(v))
def init(self, **kw):
for k, v in attrs.iteritems():
setattr(self, k, v)
for k, v in kw.iteritems():
assert k in attrs
setattr(self, k, v)
methods['__init__'] = init
methods['__getitem__'] = getattr
return type.__new__(typ, name, parents, methods)
class Struct(object):
__metaclass__ = Meta
class OpenerpEvent(Struct):
event = False
found = False
event_id = False
isRecurrence = False
isInstance = False
update = False
status = False
attendee_id = False
synchro = False
class GmailEvent(Struct):
event = False
found = False
isRecurrence = False
isInstance = False
update = False
status = False
class SyncEvent(object):
def __init__(self):
self.OE = OpenerpEvent()
self.GG = GmailEvent()
self.OP = None
def __getitem__(self, key):
return getattr(self, key)
def compute_OP(self, modeFull=True):
#If event are already in Gmail and in OpenERP
if self.OE.found and self.GG.found:
is_owner = self.OE.event.env.user.id == self.OE.event.user_id.id
#If the event has been deleted from one side, we delete on other side !
if self.OE.status != self.GG.status and is_owner:
self.OP = Delete((self.OE.status and "OE") or (self.GG.status and "GG"),
'The event has been deleted from one side, we delete on other side !')
#If event is not deleted !
elif self.OE.status and (self.GG.status or not is_owner):
if self.OE.update.split('.')[0] != self.GG.update.split('.')[0]:
if self.OE.update < self.GG.update:
tmpSrc = 'GG'
elif self.OE.update > self.GG.update:
tmpSrc = 'OE'
assert tmpSrc in ['GG', 'OE']
#if self.OP.action == None:
if self[tmpSrc].isRecurrence:
if self[tmpSrc].status:
self.OP = Update(tmpSrc, 'Only need to update, because i\'m active')
else:
self.OP = Exclude(tmpSrc, 'Need to Exclude (Me = First event from recurrence) from recurrence')
elif self[tmpSrc].isInstance:
self.OP = Update(tmpSrc, 'Only need to update, because already an exclu')
else:
self.OP = Update(tmpSrc, 'Simply Update... I\'m a single event')
else:
if not self.OE.synchro or self.OE.synchro.split('.')[0] < self.OE.update.split('.')[0]:
self.OP = Update('OE', 'Event already updated by another user, but not synchro with my google calendar')
else:
self.OP = NothingToDo("", 'Not update needed')
else:
self.OP = NothingToDo("", "Both are already deleted")
# New in openERP... Create on create_events of synchronize function
elif self.OE.found and not self.GG.found:
if self.OE.status:
self.OP = Delete('OE', 'Update or delete from GOOGLE')
else:
if not modeFull:
self.OP = Delete('GG', 'Deleted from Odoo, need to delete it from Gmail if already created')
else:
self.OP = NothingToDo("", "Already Deleted in gmail and unlinked in Odoo")
elif self.GG.found and not self.OE.found:
tmpSrc = 'GG'
if not self.GG.status and not self.GG.isInstance:
# don't need to make something... because event has been created and deleted before the synchronization
self.OP = NothingToDo("", 'Nothing to do... Create and Delete directly')
else:
if self.GG.isInstance:
if self[tmpSrc].status:
self.OP = Exclude(tmpSrc, 'Need to create the new exclu')
else:
self.OP = Exclude(tmpSrc, 'Need to copy and Exclude')
else:
self.OP = Create(tmpSrc, 'New EVENT CREATE from GMAIL')
def __str__(self):
return self.__repr__()
def __repr__(self):
myPrint = "\n\n---- A SYNC EVENT ---"
myPrint += "\n ID OE: %s " % (self.OE.event and self.OE.event.id)
myPrint += "\n ID GG: %s " % (self.GG.event and self.GG.event.get('id', False))
myPrint += "\n Name OE: %s " % (self.OE.event and self.OE.event.name.encode('utf8'))
myPrint += "\n Name GG: %s " % (self.GG.event and self.GG.event.get('summary', '').encode('utf8'))
myPrint += "\n Found OE:%5s vs GG: %5s" % (self.OE.found, self.GG.found)
myPrint += "\n Recurrence OE:%5s vs GG: %5s" % (self.OE.isRecurrence, self.GG.isRecurrence)
myPrint += "\n Instance OE:%5s vs GG: %5s" % (self.OE.isInstance, self.GG.isInstance)
myPrint += "\n Synchro OE: %10s " % (self.OE.synchro)
myPrint += "\n Update OE: %10s " % (self.OE.update)
myPrint += "\n Update GG: %10s " % (self.GG.update)
myPrint += "\n Status OE:%5s vs GG: %5s" % (self.OE.status, self.GG.status)
if (self.OP is None):
myPrint += "\n Action %s" % "---!!!---NONE---!!!---"
else:
myPrint += "\n Action %s" % type(self.OP).__name__
myPrint += "\n Source %s" % (self.OP.src)
myPrint += "\n comment %s" % (self.OP.info)
return myPrint
class SyncOperation(object):
def __init__(self, src, info, **kw):
self.src = src
self.info = info
for k, v in kw.items():
setattr(self, k, v)
def __str__(self):
return 'in__STR__'
class Create(SyncOperation):
pass
class Update(SyncOperation):
pass
class Delete(SyncOperation):
pass
class NothingToDo(SyncOperation):
pass
class Exclude(SyncOperation):
pass
class google_calendar(osv.AbstractModel):
STR_SERVICE = 'calendar'
_name = 'google.%s' % STR_SERVICE
def generate_data(self, cr, uid, event, isCreating=False, context=None):
if not context:
context = {}
if event.allday:
start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T').split('T')[0]
final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.stop, tools.DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=1), context=context).isoformat('T').split('T')[0]
type = 'date'
vstype = 'dateTime'
else:
start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')
final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')
type = 'dateTime'
vstype = 'date'
attendee_list = []
for attendee in event.attendee_ids:
email = tools.email_split(attendee.email)
email = email[0] if email else '[email protected]'
attendee_list.append({
'email': email,
'displayName': attendee.partner_id.name,
'responseStatus': attendee.state or 'needsAction',
})
reminders = []
for alarm in event.alarm_ids:
reminders.append({
"method": "email" if alarm.type == "email" else "popup",
"minutes": alarm.duration_minutes
})
data = {
"summary": event.name or '',
"description": event.description or '',
"start": {
type: start_date,
vstype: None,
'timeZone': context.get('tz', 'UTC'),
},
"end": {
type: final_date,
vstype: None,
'timeZone': context.get('tz', 'UTC'),
},
"attendees": attendee_list,
"reminders": {
"overrides": reminders,
"useDefault": "false"
},
"location": event.location or '',
"visibility": event['class'] or 'public',
}
if event.recurrency and event.rrule:
data["recurrence"] = ["RRULE:" + event.rrule]
if not event.active:
data["state"] = "cancelled"
if not self.get_need_synchro_attendee(cr, uid, context=context):
data.pop("attendees")
if isCreating:
other_google_ids = [other_att.google_internal_event_id for other_att in event.attendee_ids if other_att.google_internal_event_id]
if other_google_ids:
data["id"] = other_google_ids[0]
return data
def create_an_event(self, cr, uid, event, context=None):
gs_pool = self.pool['google.service']
data = self.generate_data(cr, uid, event, isCreating=True, context=context)
url = "/calendar/v3/calendars/%s/events?fields=%s&access_token=%s" % ('primary', urllib2.quote('id,updated'), self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data_json = simplejson.dumps(data)
return gs_pool._do_request(cr, uid, url, data_json, headers, type='POST', context=context)
def delete_an_event(self, cr, uid, event_id, context=None):
gs_pool = self.pool['google.service']
params = {
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event_id)
return gs_pool._do_request(cr, uid, url, params, headers, type='DELETE', context=context)
def get_calendar_primary_id(self, cr, uid, context=None):
params = {
'fields': 'id',
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/primary"
try:
st, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
except Exception, e:
if (e.code == 401): # Token invalid / Acces unauthorized
error_msg = "Your token is invalid or has been revoked !"
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, SUPERUSER_ID, [uid], {'google_calendar_token': False, 'google_calendar_token_validity': False}, context=context)
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
raise
return (status_response(st), content['id'] or False, ask_time)
def get_event_synchro_dict(self, cr, uid, lastSync=False, token=False, nextPageToken=False, context=None):
if not token:
token = self.get_token(cr, uid, context)
params = {
'fields': 'items,nextPageToken',
'access_token': token,
'maxResults': 1000,
#'timeMin': self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz"),
}
if lastSync:
params['updatedMin'] = lastSync.strftime("%Y-%m-%dT%H:%M:%S.%fz")
params['showDeleted'] = True
else:
params['timeMin'] = self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz")
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events" % 'primary'
if nextPageToken:
params['pageToken'] = nextPageToken
status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
google_events_dict = {}
for google_event in content['items']:
google_events_dict[google_event['id']] = google_event
if content.get('nextPageToken'):
google_events_dict.update(
self.get_event_synchro_dict(cr, uid, lastSync=lastSync, token=token, nextPageToken=content['nextPageToken'], context=context)
)
return google_events_dict
def get_one_event_synchro(self, cr, uid, google_id, context=None):
token = self.get_token(cr, uid, context)
params = {
'access_token': token,
'maxResults': 1000,
'showDeleted': True,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', google_id)
try:
status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
except:
_logger.info("Calendar Synchro - In except of get_one_event_synchro")
pass
return status_response(status) and content or False
def update_to_google(self, cr, uid, oe_event, google_event, context):
calendar_event = self.pool['calendar.event']
url = "/calendar/v3/calendars/%s/events/%s?fields=%s&access_token=%s" % ('primary', google_event['id'], 'id,updated', self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = self.generate_data(cr, uid, oe_event, context=context)
data['sequence'] = google_event.get('sequence', 0)
data_json = simplejson.dumps(data)
status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, data_json, headers, type='PATCH', context=context)
update_date = datetime.strptime(content['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
calendar_event.write(cr, uid, [oe_event.id], {'oe_update_date': update_date})
if context['curr_attendee']:
self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date}, context)
def update_an_event(self, cr, uid, event, context=None):
data = self.generate_data(cr, uid, event, context=context)
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event.google_internal_event_id)
headers = {}
data['access_token'] = self.get_token(cr, uid, context)
status, response, ask_time = self.pool['google.service']._do_request(cr, uid, url, data, headers, type='GET', context=context)
#TO_CHECK : , if http fail, no event, do DELETE ?
return response
def update_recurrent_event_exclu(self, cr, uid, instance_id, event_ori_google_id, event_new, context=None):
gs_pool = self.pool['google.service']
data = self.generate_data(cr, uid, event_new, context=context)
data['recurringEventId'] = event_ori_google_id
data['originalStartTime'] = event_new.recurrent_id_date
url = "/calendar/v3/calendars/%s/events/%s?access_token=%s" % ('primary', instance_id, self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json'}
data['sequence'] = self.get_sequence(cr, uid, instance_id, context)
data_json = simplejson.dumps(data)
return gs_pool._do_request(cr, uid, url, data_json, headers, type='PUT', context=context)
def update_from_google(self, cr, uid, event, single_event_dict, type, context):
if context is None:
context = []
calendar_event = self.pool['calendar.event']
res_partner_obj = self.pool['res.partner']
calendar_attendee_obj = self.pool['calendar.attendee']
calendar_alarm_obj = self.pool['calendar.alarm']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context).partner_id.id
attendee_record = []
alarm_record = set()
partner_record = [(4, myPartnerID)]
result = {}
if self.get_need_synchro_attendee(cr, uid, context=context):
for google_attendee in single_event_dict.get('attendees', []):
partner_email = google_attendee.get('email', False)
if type == "write":
for oe_attendee in event['attendee_ids']:
if oe_attendee.email == partner_email:
calendar_attendee_obj.write(cr, uid, [oe_attendee.id], {'state': google_attendee['responseStatus']}, context=context)
google_attendee['found'] = True
continue
if google_attendee.get('found'):
continue
attendee_id = res_partner_obj.search(cr, uid, [('email', '=', partner_email)], context=context)
if not attendee_id:
data = {
'email': partner_email,
'customer': False,
'name': google_attendee.get("displayName", False) or partner_email
}
attendee_id = [res_partner_obj.create(cr, uid, data, context=context)]
attendee = res_partner_obj.read(cr, uid, attendee_id[0], ['email'], context=context)
partner_record.append((4, attendee.get('id')))
attendee['partner_id'] = attendee.pop('id')
attendee['state'] = google_attendee['responseStatus']
attendee_record.append((0, 0, attendee))
for google_alarm in single_event_dict.get('reminders', {}).get('overrides', []):
alarm_id = calendar_alarm_obj.search(
cr,
uid,
[
('type', '=', google_alarm['method'] if google_alarm['method'] == 'email' else 'notification'),
('duration_minutes', '=', google_alarm['minutes'])
],
context=context
)
if not alarm_id:
data = {
'type': google_alarm['method'] if google_alarm['method'] == 'email' else 'notification',
'duration': google_alarm['minutes'],
'interval': 'minutes',
'name': "%s minutes - %s" % (google_alarm['minutes'], google_alarm['method'])
}
alarm_id = [calendar_alarm_obj.create(cr, uid, data, context=context)]
alarm_record.add(alarm_id[0])
UTC = pytz.timezone('UTC')
if single_event_dict.get('start') and single_event_dict.get('end'): # If not cancelled
if single_event_dict['start'].get('dateTime', False) and single_event_dict['end'].get('dateTime', False):
date = parser.parse(single_event_dict['start']['dateTime'])
stop = parser.parse(single_event_dict['end']['dateTime'])
date = str(date.astimezone(UTC))[:-6]
stop = str(stop.astimezone(UTC))[:-6]
allday = False
else:
date = (single_event_dict['start']['date'])
stop = (single_event_dict['end']['date'])
d_end = datetime.strptime(stop, DEFAULT_SERVER_DATE_FORMAT)
allday = True
d_end = d_end + timedelta(days=-1)
stop = d_end.strftime(DEFAULT_SERVER_DATE_FORMAT)
update_date = datetime.strptime(single_event_dict['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
result.update({
'start': date,
'stop': stop,
'allday': allday
})
result.update({
'attendee_ids': attendee_record,
'partner_ids': list(set(partner_record)),
'alarm_ids': [(6, 0, list(alarm_record))],
'name': single_event_dict.get('summary', 'Event'),
'description': single_event_dict.get('description', False),
'location': single_event_dict.get('location', False),
'class': single_event_dict.get('visibility', 'public'),
'oe_update_date': update_date,
})
if single_event_dict.get("recurrence", False):
rrule = [rule for rule in single_event_dict["recurrence"] if rule.startswith("RRULE:")][0][6:]
result['rrule'] = rrule
context = dict(context or {}, no_mail_to_attendees=True)
if type == "write":
res = calendar_event.write(cr, uid, event['id'], result, context=context)
elif type == "copy":
result['recurrence'] = True
res = calendar_event.write(cr, uid, [event['id']], result, context=context)
elif type == "create":
res = calendar_event.create(cr, uid, result, context=context)
if context['curr_attendee']:
self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date, 'google_internal_event_id': single_event_dict.get('id', False)}, context)
return res
def remove_references(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
reset_data = {
'google_calendar_rtoken': False,
'google_calendar_token': False,
'google_calendar_token_validity': False,
'google_calendar_last_sync_date': False,
'google_calendar_cal_id': False,
}
all_my_attendees = self.pool['calendar.attendee'].search(cr, uid, [('partner_id', '=', current_user.partner_id.id)], context=context)
self.pool['calendar.attendee'].write(cr, uid, all_my_attendees, {'oe_synchro_date': False, 'google_internal_event_id': False}, context=context)
current_user.write(reset_data)
return True
def synchronize_events_cron(self, cr, uid, context=None):
ids = self.pool['res.users'].search(cr, uid, [('google_calendar_last_sync_date', '!=', False)], context=context)
_logger.info("Calendar Synchro - Started by cron")
for user_to_sync in ids:
_logger.info("Calendar Synchro - Starting synchronization for a new user [%s] " % user_to_sync)
try:
resp = self.synchronize_events(cr, user_to_sync, False, lastSync=True, context=None)
if resp.get("status") == "need_reset":
_logger.info("[%s] Calendar Synchro - Failed - NEED RESET !" % user_to_sync)
else:
_logger.info("[%s] Calendar Synchro - Done with status : %s !" % (user_to_sync, resp.get("status")))
except Exception, e:
_logger.info("[%s] Calendar Synchro - Exception : %s !" % (user_to_sync, exception_to_unicode(e)))
_logger.info("Calendar Synchro - Ended by cron")
def synchronize_events(self, cr, uid, ids, lastSync=True, context=None):
if context is None:
context = {}
# def isValidSync(syncToken):
# gs_pool = self.pool['google.service']
# params = {
# 'maxResults': 1,
# 'fields': 'id',
# 'access_token': self.get_token(cr, uid, context),
# 'syncToken': syncToken,
# }
# url = "/calendar/v3/calendars/primary/events"
# status, response = gs_pool._do_request(cr, uid, url, params, type='GET', context=context)
# return int(status) != 410
user_to_sync = ids and ids[0] or uid
current_user = self.pool['res.users'].browse(cr, SUPERUSER_ID, user_to_sync, context=context)
st, current_google, ask_time = self.get_calendar_primary_id(cr, user_to_sync, context=context)
if current_user.google_calendar_cal_id:
if current_google != current_user.google_calendar_cal_id:
return {
"status": "need_reset",
"info": {
"old_name": current_user.google_calendar_cal_id,
"new_name": current_google
},
"url": ''
}
if lastSync and self.get_last_sync_date(cr, user_to_sync, context=context) and not self.get_disable_since_synchro(cr, user_to_sync, context=context):
lastSync = self.get_last_sync_date(cr, user_to_sync, context)
_logger.info("[%s] Calendar Synchro - MODE SINCE_MODIFIED : %s !" % (user_to_sync, lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT)))
else:
lastSync = False
_logger.info("[%s] Calendar Synchro - MODE FULL SYNCHRO FORCED" % user_to_sync)
else:
current_user.write({'google_calendar_cal_id': current_google})
lastSync = False
_logger.info("[%s] Calendar Synchro - MODE FULL SYNCHRO - NEW CAL ID" % user_to_sync)
new_ids = []
new_ids += self.create_new_events(cr, user_to_sync, context=context)
new_ids += self.bind_recurring_events_to_google(cr, user_to_sync, context)
res = self.update_events(cr, user_to_sync, lastSync, context)
current_user.write({'google_calendar_last_sync_date': ask_time})
return {
"status": res and "need_refresh" or "no_new_event_from_google",
"url": ''
}
def create_new_events(self, cr, uid, context=None):
if context is None:
context = {}
new_ids = []
ev_obj = self.pool['calendar.event']
att_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_norecurrent = context.copy()
context_norecurrent['virtual_id'] = False
my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID),
('google_internal_event_id', '=', False),
'|',
('event_id.stop', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('event_id.final_date', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
], context=context_norecurrent)
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
other_google_ids = [other_att.google_internal_event_id for other_att in att.event_id.attendee_ids if other_att.google_internal_event_id and other_att.id != att.id]
for other_google_id in other_google_ids:
if self.get_one_event_synchro(cr, uid, other_google_id, context=context):
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': other_google_id})
break
else:
if not att.event_id.recurrent_id or att.event_id.recurrent_id == 0:
st, response, ask_time = self.create_an_event(cr, uid, att.event_id, context=context)
if status_response(st):
update_date = datetime.strptime(response['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
ev_obj.write(cr, uid, att.event_id.id, {'oe_update_date': update_date})
new_ids.append(response['id'])
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': response['id'], 'oe_synchro_date': update_date})
cr.commit()
else:
_logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st))
_logger.warning("Response : %s" % response)
return new_ids
def get_context_no_virtual(self, context):
context_norecurrent = context.copy()
context_norecurrent['virtual_id'] = False
context_norecurrent['active_test'] = False
return context_norecurrent
def bind_recurring_events_to_google(self, cr, uid, context=None):
if context is None:
context = {}
new_ids = []
ev_obj = self.pool['calendar.event']
att_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_norecurrent = self.get_context_no_virtual(context)
my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('google_internal_event_id', '=', False)], context=context_norecurrent)
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
if att.event_id.recurrent_id and att.event_id.recurrent_id > 0:
new_google_internal_event_id = False
source_event_record = ev_obj.browse(cr, uid, att.event_id.recurrent_id, context)
source_attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', source_event_record.id)], context=context)
if not source_attendee_record_id:
continue
source_attendee_record = att_obj.browse(cr, uid, source_attendee_record_id, context)[0]
if att.event_id.recurrent_id_date and source_event_record.allday and source_attendee_record.google_internal_event_id:
new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.split(' ')[0].replace('-', '')
elif att.event_id.recurrent_id_date and source_attendee_record.google_internal_event_id:
new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.replace('-', '').replace(' ', 'T').replace(':', '') + 'Z'
if new_google_internal_event_id:
#TODO WARNING, NEED TO CHECK THAT EVENT and ALL instance NOT DELETE IN GMAIL BEFORE !
try:
st, response, ask_time = self.update_recurrent_event_exclu(cr, uid, new_google_internal_event_id, source_attendee_record.google_internal_event_id, att.event_id, context=context)
if status_response(st):
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': new_google_internal_event_id}, context=context)
new_ids.append(new_google_internal_event_id)
cr.commit()
else:
_logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st))
_logger.warning("Response : %s" % response)
except:
pass
return new_ids
def update_events(self, cr, uid, lastSync=False, context=None):
context = dict(context or {})
calendar_event = self.pool['calendar.event']
user_obj = self.pool['res.users']
att_obj = self.pool['calendar.attendee']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_novirtual = self.get_context_no_virtual(context)
if lastSync:
try:
all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=lastSync, context=context)
except urllib2.HTTPError, e:
if e.code == 410: # GONE, Google is lost.
# we need to force the rollback from this cursor, because it locks my res_users but I need to write in this tuple before to raise.
cr.rollback()
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, SUPERUSER_ID, [uid], {'google_calendar_last_sync_date': False}, context=context)
error_key = simplejson.loads(str(e))
error_key = error_key.get('error', {}).get('message', 'nc')
error_msg = "Google is lost... the next synchro will be a full synchro. \n\n %s" % error_key
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
my_google_att_ids = att_obj.search(cr, uid, [
('partner_id', '=', myPartnerID),
('google_internal_event_id', 'in', all_event_from_google.keys())
], context=context_novirtual)
my_openerp_att_ids = att_obj.search(cr, uid, [
('partner_id', '=', myPartnerID),
('event_id.oe_update_date', '>', lastSync and lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT) or self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('google_internal_event_id', '!=', False),
], context=context_novirtual)
my_openerp_googleinternal_ids = att_obj.read(cr, uid, my_openerp_att_ids, ['google_internal_event_id', 'event_id'], context=context_novirtual)
if self.get_print_log(cr, uid, context=context):
_logger.info("Calendar Synchro - \n\nUPDATE IN GOOGLE\n%s\n\nRETRIEVE FROM OE\n%s\n\nUPDATE IN OE\n%s\n\nRETRIEVE FROM GG\n%s\n\n" % (all_event_from_google, my_google_att_ids, my_openerp_att_ids, my_openerp_googleinternal_ids))
for giid in my_openerp_googleinternal_ids:
active = True # if not sure, we request google
if giid.get('event_id'):
active = calendar_event.browse(cr, uid, int(giid.get('event_id')[0]), context=context_novirtual).active
if giid.get('google_internal_event_id') and not all_event_from_google.get(giid.get('google_internal_event_id')) and active:
one_event = self.get_one_event_synchro(cr, uid, giid.get('google_internal_event_id'), context=context)
if one_event:
all_event_from_google[one_event['id']] = one_event
my_att_ids = list(set(my_google_att_ids + my_openerp_att_ids))
else:
domain = [
('partner_id', '=', myPartnerID),
('google_internal_event_id', '!=', False),
'|',
('event_id.stop', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('event_id.final_date', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
]
# Select all events from OpenERP which have been already synchronized in gmail
my_att_ids = att_obj.search(cr, uid, domain, context=context_novirtual)
all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=False, context=context)
event_to_synchronize = {}
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
event = att.event_id
base_event_id = att.google_internal_event_id.rsplit('_', 1)[0]
if base_event_id not in event_to_synchronize:
event_to_synchronize[base_event_id] = {}
if att.google_internal_event_id not in event_to_synchronize[base_event_id]:
event_to_synchronize[base_event_id][att.google_internal_event_id] = SyncEvent()
ev_to_sync = event_to_synchronize[base_event_id][att.google_internal_event_id]
ev_to_sync.OE.attendee_id = att.id
ev_to_sync.OE.event = event
ev_to_sync.OE.found = True
ev_to_sync.OE.event_id = event.id
ev_to_sync.OE.isRecurrence = event.recurrency
ev_to_sync.OE.isInstance = bool(event.recurrent_id and event.recurrent_id > 0)
ev_to_sync.OE.update = event.oe_update_date
ev_to_sync.OE.status = event.active
ev_to_sync.OE.synchro = att.oe_synchro_date
for event in all_event_from_google.values():
event_id = event.get('id')
base_event_id = event_id.rsplit('_', 1)[0]
if base_event_id not in event_to_synchronize:
event_to_synchronize[base_event_id] = {}
if event_id not in event_to_synchronize[base_event_id]:
event_to_synchronize[base_event_id][event_id] = SyncEvent()
ev_to_sync = event_to_synchronize[base_event_id][event_id]
ev_to_sync.GG.event = event
ev_to_sync.GG.found = True
ev_to_sync.GG.isRecurrence = bool(event.get('recurrence', ''))
ev_to_sync.GG.isInstance = bool(event.get('recurringEventId', 0))
ev_to_sync.GG.update = event.get('updated', None) # if deleted, no date without browse event
if ev_to_sync.GG.update:
ev_to_sync.GG.update = ev_to_sync.GG.update.replace('T', ' ').replace('Z', '')
ev_to_sync.GG.status = (event.get('status') != 'cancelled')
######################
# PRE-PROCESSING #
######################
for base_event in event_to_synchronize:
for current_event in event_to_synchronize[base_event]:
event_to_synchronize[base_event][current_event].compute_OP(modeFull=not lastSync)
if self.get_print_log(cr, uid, context=context):
if not isinstance(event_to_synchronize[base_event][current_event].OP, NothingToDo):
_logger.info(event_to_synchronize[base_event])
######################
# DO ACTION #
######################
for base_event in event_to_synchronize:
event_to_synchronize[base_event] = sorted(event_to_synchronize[base_event].iteritems(), key=operator.itemgetter(0))
for current_event in event_to_synchronize[base_event]:
cr.commit()
event = current_event[1] # event is an Sync Event !
actToDo = event.OP
actSrc = event.OP.src
context['curr_attendee'] = event.OE.attendee_id
if isinstance(actToDo, NothingToDo):
continue
elif isinstance(actToDo, Create):
context_tmp = context.copy()
context_tmp['NewMeeting'] = True
if actSrc == 'GG':
res = self.update_from_google(cr, uid, False, event.GG.event, "create", context=context_tmp)
event.OE.event_id = res
meeting = calendar_event.browse(cr, uid, res, context=context)
attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', res)], context=context)
self.pool['calendar.attendee'].write(cr, uid, attendee_record_id, {'oe_synchro_date': meeting.oe_update_date, 'google_internal_event_id': event.GG.event['id']}, context=context_tmp)
elif actSrc == 'OE':
raise "Should be never here, creation for OE is done before update !"
#TODO Add to batch
elif isinstance(actToDo, Update):
if actSrc == 'GG':
self.update_from_google(cr, uid, event.OE.event, event.GG.event, 'write', context)
elif actSrc == 'OE':
self.update_to_google(cr, uid, event.OE.event, event.GG.event, context)
elif isinstance(actToDo, Exclude):
if actSrc == 'OE':
self.delete_an_event(cr, uid, current_event[0], context=context)
elif actSrc == 'GG':
new_google_event_id = event.GG.event['id'].rsplit('_', 1)[1]
if 'T' in new_google_event_id:
new_google_event_id = new_google_event_id.replace('T', '')[:-1]
else:
new_google_event_id = new_google_event_id + "000000"
if event.GG.status:
parent_event = {}
if not event_to_synchronize[base_event][0][1].OE.event_id:
main_ev = att_obj.search_read(cr, uid, [('google_internal_event_id', '=', event.GG.event['id'].rsplit('_', 1)[0])], fields=['event_id'], context=context_novirtual)
event_to_synchronize[base_event][0][1].OE.event_id = main_ev[0].get('event_id')[0]
parent_event['id'] = "%s-%s" % (event_to_synchronize[base_event][0][1].OE.event_id, new_google_event_id)
res = self.update_from_google(cr, uid, parent_event, event.GG.event, "copy", context)
else:
parent_oe_id = event_to_synchronize[base_event][0][1].OE.event_id
if parent_oe_id:
calendar_event.unlink(cr, uid, "%s-%s" % (parent_oe_id, new_google_event_id), can_be_deleted=True, context=context)
elif isinstance(actToDo, Delete):
if actSrc == 'GG':
try:
self.delete_an_event(cr, uid, current_event[0], context=context)
except Exception, e:
error = simplejson.loads(e.read())
error_nr = error.get('error', {}).get('code')
# if already deleted from gmail or never created
if error_nr in (404, 410,):
pass
else:
raise e
elif actSrc == 'OE':
calendar_event.unlink(cr, uid, event.OE.event_id, can_be_deleted=False, context=context)
return True
def check_and_sync(self, cr, uid, oe_event, google_event, context):
if datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") > datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"):
self.update_to_google(cr, uid, oe_event, google_event, context)
elif datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") < datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"):
self.update_from_google(cr, uid, oe_event, google_event, 'write', context)
def get_sequence(self, cr, uid, instance_id, context=None):
gs_pool = self.pool['google.service']
params = {
'fields': 'sequence',
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', instance_id)
st, content, ask_time = gs_pool._do_request(cr, uid, url, params, headers, type='GET', context=context)
return content.get('sequence', 0)
#################################
## MANAGE CONNEXION TO GMAIL ##
#################################
def get_token(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if not current_user.google_calendar_token_validity or \
datetime.strptime(current_user.google_calendar_token_validity.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT) < (datetime.now() + timedelta(minutes=1)):
self.do_refresh_token(cr, uid, context=context)
current_user.refresh()
return current_user.google_calendar_token
def get_last_sync_date(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return current_user.google_calendar_last_sync_date and datetime.strptime(current_user.google_calendar_last_sync_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(minutes=0) or False
def do_refresh_token(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
gs_pool = self.pool['google.service']
all_token = gs_pool._refresh_google_token_json(cr, uid, current_user.google_calendar_rtoken, self.STR_SERVICE, context=context)
vals = {}
vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))
vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')
self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)
def need_authorize(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return current_user.google_calendar_rtoken is False
def get_calendar_scope(self, RO=False):
readonly = RO and '.readonly' or ''
return 'https://www.googleapis.com/auth/calendar%s' % (readonly)
def authorize_google_uri(self, cr, uid, from_url='http://www.openerp.com', context=None):
url = self.pool['google.service']._get_authorize_uri(cr, uid, from_url, self.STR_SERVICE, scope=self.get_calendar_scope(), context=context)
return url
def can_authorize_google(self, cr, uid, context=None):
return self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager')
def set_all_tokens(self, cr, uid, authorization_code, context=None):
gs_pool = self.pool['google.service']
all_token = gs_pool._get_google_token_json(cr, uid, authorization_code, self.STR_SERVICE, context=context)
vals = {}
vals['google_%s_rtoken' % self.STR_SERVICE] = all_token.get('refresh_token')
vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))
vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')
self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)
def get_minTime(self, cr, uid, context=None):
number_of_week = int(self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.week_synchro', default=13))
return datetime.now() - timedelta(weeks=number_of_week)
def get_need_synchro_attendee(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_synchro_attendee', default=True)
def get_disable_since_synchro(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_since_synchro', default=False)
def get_print_log(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.debug_print', default=False)
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'google_calendar_rtoken': fields.char('Refresh Token'),
'google_calendar_token': fields.char('User token'),
'google_calendar_token_validity': fields.datetime('Token Validity'),
'google_calendar_last_sync_date': fields.datetime('Last synchro date'),
'google_calendar_cal_id': fields.char('Calendar ID', help='Last Calendar ID who has been synchronized. If it is changed, we remove \
all links between GoogleID and Odoo Google Internal ID')
}
class calendar_event(osv.Model):
_inherit = "calendar.event"
def get_fields_need_update_google(self, cr, uid, context=None):
return ['name', 'description', 'allday', 'start', 'date_end', 'stop',
'attendee_ids', 'alarm_ids', 'location', 'class', 'active',
'start_date', 'start_datetime', 'stop_date', 'stop_datetime']
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
sync_fields = set(self.get_fields_need_update_google(cr, uid, context))
if (set(vals.keys()) & sync_fields) and 'oe_update_date' not in vals.keys() and 'NewMeeting' not in context:
vals['oe_update_date'] = datetime.now()
return super(calendar_event, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
if default.get('write_type', False):
del default['write_type']
elif default.get('recurrent_id', False):
default['oe_update_date'] = datetime.now()
else:
default['oe_update_date'] = False
return super(calendar_event, self).copy(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, can_be_deleted=False, context=None):
return super(calendar_event, self).unlink(cr, uid, ids, can_be_deleted=can_be_deleted, context=context)
_columns = {
'oe_update_date': fields.datetime('Odoo Update Date'),
}
class calendar_attendee(osv.Model):
_inherit = 'calendar.attendee'
_columns = {
'google_internal_event_id': fields.char('Google Calendar Event Id'),
'oe_synchro_date': fields.datetime('Odoo Synchro Date'),
}
_sql_constraints = [('google_id_uniq', 'unique(google_internal_event_id,partner_id,event_id)', 'Google ID should be unique!')]
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
for id in ids:
ref = vals.get('event_id', self.browse(cr, uid, id, context=context).event_id.id)
# If attendees are updated, we need to specify that next synchro need an action
# Except if it come from an update_from_google
if not context.get('curr_attendee', False) and not context.get('NewMeeting', False):
self.pool['calendar.event'].write(cr, uid, ref, {'oe_update_date': datetime.now()}, context)
return super(calendar_attendee, self).write(cr, uid, ids, vals, context=context)
| agpl-3.0 | 3,721,639,721,242,957,000 | 48.100478 | 244 | 0.565718 | false |
thelonious/g2x | overlay/DataManager.py | 2 | 1943 | import sqlite3
from DataRegion import DataRegion
SELECT_DEPTH = """
select
date, property, value
from
readings
where
device='Pressure/Temperature' and
property in ('running','depth_feet')
"""
SELECT_TEMPERATURE = """
select
date, property, value
from
readings
where
device='Pressure/Temperature' and
property in ('running','fahrenheit')
"""
class DataManager:
def __init__(self):
self.depth_regions = []
self.temperature_regions = []
def load(self, db_file):
self.connection = sqlite3.connect(db_file)
self.depth_regions = self.process_query(SELECT_DEPTH)
self.temperature_regions = self.process_query(SELECT_TEMPERATURE)
self.connection.close()
def process_query(self, query):
running = False
regions = []
currentRegion = None
for (date, property, value) in self.connection.execute(query):
if property == "running":
running = value == 1.0
if running:
currentRegion = DataRegion()
regions.append(currentRegion)
else:
if currentRegion is not None and len(currentRegion.data) == 0:
regions.pop()
currentRegion = None
elif running:
currentRegion.addTimeData(date, value)
else:
print("hmm, got value, but we're not supposedly running")
return regions
def select_depths(self, start_time, end_time):
result = []
for region in self.depth_regions:
result.extend(region.dataInTimeRegion(start_time, end_time))
return result
def select_temperatures(self, start_time, end_time):
result = []
for region in self.temperature_regions:
result.extend(region.dataInTimeRegion(start_time, end_time))
return result
| mit | 6,062,674,781,791,214,000 | 25.256757 | 82 | 0.589295 | false |
mskovacic/Projekti | raspberrypi/isprobavanje/pyqt5/Showing_a_tooltip.py | 1 | 1033 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
ZetCode PyQt5 tutorial
This example shows a tooltip on
a window and a button.
author: Jan Bodnar
website: zetcode.com
last edited: January 2015
"""
import sys
from PyQt5.QtWidgets import (QWidget, QToolTip,
QPushButton, QApplication)
from PyQt5.QtGui import QFont
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
QToolTip.setFont(QFont('SansSerif', 10))
self.setToolTip('This is a <b>QWidget</b> widget')
btn = QPushButton('Button', self)
btn.setToolTip('This is a <b>QPushButton</b> widget')
btn.resize(btn.sizeHint())
btn.move(50, 50)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('Tooltips')
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| mit | -3,473,603,819,737,326,600 | 19.66 | 61 | 0.555663 | false |
soldag/home-assistant | homeassistant/helpers/singleton.py | 8 | 1596 | """Helper to help coordinating calls."""
import asyncio
import functools
from typing import Callable, Optional, TypeVar, cast
from homeassistant.core import HomeAssistant
from homeassistant.loader import bind_hass
T = TypeVar("T")
FUNC = Callable[[HomeAssistant], T]
def singleton(data_key: str) -> Callable[[FUNC], FUNC]:
"""Decorate a function that should be called once per instance.
Result will be cached and simultaneous calls will be handled.
"""
def wrapper(func: FUNC) -> FUNC:
"""Wrap a function with caching logic."""
if not asyncio.iscoroutinefunction(func):
@bind_hass
@functools.wraps(func)
def wrapped(hass: HomeAssistant) -> T:
obj: Optional[T] = hass.data.get(data_key)
if obj is None:
obj = hass.data[data_key] = func(hass)
return obj
return wrapped
@bind_hass
@functools.wraps(func)
async def async_wrapped(hass: HomeAssistant) -> T:
obj_or_evt = hass.data.get(data_key)
if not obj_or_evt:
evt = hass.data[data_key] = asyncio.Event()
result = await func(hass)
hass.data[data_key] = result
evt.set()
return cast(T, result)
if isinstance(obj_or_evt, asyncio.Event):
evt = obj_or_evt
await evt.wait()
return cast(T, hass.data.get(data_key))
return cast(T, obj_or_evt)
return async_wrapped
return wrapper
| apache-2.0 | 4,843,913,981,789,948,000 | 27 | 67 | 0.570175 | false |
ThiefMaster/werkzeug | tests/contrib/test_securecookie.py | 30 | 1328 | # -*- coding: utf-8 -*-
"""
tests.securecookie
~~~~~~~~~~~~~~~~~~
Tests the secure cookie.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.utils import parse_cookie
from werkzeug.wrappers import Request, Response
from werkzeug.contrib.securecookie import SecureCookie
def test_basic_support():
c = SecureCookie(secret_key=b'foo')
assert c.new
assert not c.modified
assert not c.should_save
c['x'] = 42
assert c.modified
assert c.should_save
s = c.serialize()
c2 = SecureCookie.unserialize(s, b'foo')
assert c is not c2
assert not c2.new
assert not c2.modified
assert not c2.should_save
assert c2 == c
c3 = SecureCookie.unserialize(s, b'wrong foo')
assert not c3.modified
assert not c3.new
assert c3 == {}
def test_wrapper_support():
req = Request.from_values()
resp = Response()
c = SecureCookie.load_cookie(req, secret_key=b'foo')
assert c.new
c['foo'] = 42
assert c.secret_key == b'foo'
c.save_cookie(resp)
req = Request.from_values(headers={
'Cookie': 'session="%s"' % parse_cookie(resp.headers['set-cookie'])['session']
})
c2 = SecureCookie.load_cookie(req, secret_key=b'foo')
assert not c2.new
assert c2 == c
| bsd-3-clause | -1,901,504,666,548,185,900 | 23.592593 | 87 | 0.634036 | false |
Designist/sympy | sympy/utilities/randtest.py | 54 | 5488 | """ Helpers for randomized testing """
from __future__ import print_function, division
from random import uniform
import random
from sympy.core.numbers import I
from sympy.simplify.simplify import nsimplify
from sympy.core.containers import Tuple
from sympy.core.numbers import comp
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, as_int
def random_complex_number(a=2, b=-1, c=3, d=1, rational=False):
"""
Return a random complex number.
To reduce chance of hitting branch cuts or anything, we guarantee
b <= Im z <= d, a <= Re z <= c
"""
A, B = uniform(a, c), uniform(b, d)
if not rational:
return A + I*B
return nsimplify(A, rational=True) + I*nsimplify(B, rational=True)
def verify_numerically(f, g, z=None, tol=1.0e-6, a=2, b=-1, c=3, d=1):
"""
Test numerically that f and g agree when evaluated in the argument z.
If z is None, all symbols will be tested. This routine does not test
whether there are Floats present with precision higher than 15 digits
so if there are, your results may not be what you expect due to round-
off errors.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x
>>> from sympy.utilities.randtest import verify_numerically as tn
>>> tn(sin(x)**2 + cos(x)**2, 1, x)
True
"""
f, g, z = Tuple(f, g, z)
z = [z] if isinstance(z, Symbol) else (f.free_symbols | g.free_symbols)
reps = list(zip(z, [random_complex_number(a, b, c, d) for zi in z]))
z1 = f.subs(reps).n()
z2 = g.subs(reps).n()
return comp(z1, z2, tol)
def test_derivative_numerically(f, z, tol=1.0e-6, a=2, b=-1, c=3, d=1):
"""
Test numerically that the symbolically computed derivative of f
with respect to z is correct.
This routine does not test whether there are Floats present with
precision higher than 15 digits so if there are, your results may
not be what you expect due to round-off errors.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x
>>> from sympy.utilities.randtest import test_derivative_numerically as td
>>> td(sin(x), x)
True
"""
from sympy.core.function import Derivative
z0 = random_complex_number(a, b, c, d)
f1 = f.diff(z).subs(z, z0)
f2 = Derivative(f, z).doit_numerically(z0)
return comp(f1.n(), f2.n(), tol)
def _randrange(seed=None):
"""Return a randrange generator. ``seed`` can be
o None - return randomly seeded generator
o int - return a generator seeded with the int
o list - the values to be returned will be taken from the list
in the order given; the provided list is not modified.
Examples
========
>>> from sympy.utilities.randtest import _randrange
>>> rr = _randrange()
>>> rr(1000) # doctest: +SKIP
999
>>> rr = _randrange(3)
>>> rr(1000) # doctest: +SKIP
238
>>> rr = _randrange([0, 5, 1, 3, 4])
>>> rr(3), rr(3)
(0, 1)
"""
if seed is None:
return random.randrange
elif isinstance(seed, int):
return random.Random(seed).randrange
elif is_sequence(seed):
seed = list(seed) # make a copy
seed.reverse()
def give(a, b=None, seq=seed):
if b is None:
a, b = 0, a
a, b = as_int(a), as_int(b)
w = b - a
if w < 1:
raise ValueError('_randrange got empty range')
try:
x = seq.pop()
except AttributeError:
raise ValueError('_randrange expects a list-like sequence')
except IndexError:
raise ValueError('_randrange sequence was too short')
if a <= x < b:
return x
else:
return give(a, b, seq)
return give
else:
raise ValueError('_randrange got an unexpected seed')
def _randint(seed=None):
"""Return a randint generator. ``seed`` can be
o None - return randomly seeded generator
o int - return a generator seeded with the int
o list - the values to be returned will be taken from the list
in the order given; the provided list is not modified.
Examples
========
>>> from sympy.utilities.randtest import _randint
>>> ri = _randint()
>>> ri(1, 1000) # doctest: +SKIP
999
>>> ri = _randint(3)
>>> ri(1, 1000) # doctest: +SKIP
238
>>> ri = _randint([0, 5, 1, 2, 4])
>>> ri(1, 3), ri(1, 3)
(1, 2)
"""
if seed is None:
return random.randint
elif isinstance(seed, int):
return random.Random(seed).randint
elif is_sequence(seed):
seed = list(seed) # make a copy
seed.reverse()
def give(a, b, seq=seed):
a, b = as_int(a), as_int(b)
w = b - a
if w < 0:
raise ValueError('_randint got empty range')
try:
x = seq.pop()
except AttributeError:
raise ValueError('_randint expects a list-like sequence')
except IndexError:
raise ValueError('_randint sequence was too short')
if a <= x <= b:
return x
else:
return give(a, b, seq)
return give
else:
raise ValueError('_randint got an unexpected seed')
| bsd-3-clause | -5,153,507,352,518,488,000 | 29.831461 | 78 | 0.579628 | false |
rosswhitfield/mantid | scripts/SANS/sans/state/StateObjects/StateWavelength.py | 3 | 3361 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
""" Defines the state of the event slices which should be reduced."""
import json
import copy
from sans.state.JsonSerializable import JsonSerializable
from sans.common.enums import (RebinType, RangeStepType, SANSFacility)
from sans.state.StateObjects.wavelength_interval import WavelengthInterval
from sans.state.automatic_setters import automatic_setters
from sans.state.state_functions import one_is_none, validation_message
class StateWavelength(metaclass=JsonSerializable):
def __init__(self):
super(StateWavelength, self).__init__()
self.rebin_type = RebinType.REBIN
self.wavelength_interval: WavelengthInterval = WavelengthInterval()
self.wavelength_step_type = RangeStepType.NOT_SET
@property
def wavelength_step_type_lin_log(self):
# Return the wavelength step type, converting RANGE_LIN/RANGE_LOG to
# LIN/LOG. This is not ideal but is required for workflow algorithms
# which only accept a subset of the values in the enum
value = self.wavelength_step_type
result = RangeStepType.LIN if value in [RangeStepType.LIN, RangeStepType.RANGE_LIN] else \
RangeStepType.LOG if value in [RangeStepType.LOG, RangeStepType.RANGE_LOG] else \
RangeStepType.NOT_SET
return result
def validate(self):
is_invalid = dict()
if one_is_none([self.wavelength_interval]):
entry = validation_message("A wavelength entry has not been set.",
"Make sure that all entries for the wavelength are set.",
{"wavelength_binning": self.wavelength_interval})
is_invalid.update(entry)
if is_invalid:
raise ValueError("StateWavelength: The provided inputs are illegal. "
"Please see: {0}".format(json.dumps(is_invalid, indent=4)))
# ----------------------------------------------------------------------------------------------------------------------
# Builder
# ----------------------------------------------------------------------------------------------------------------------
class StateWavelengthBuilder(object):
@automatic_setters(StateWavelength)
def __init__(self):
super(StateWavelengthBuilder, self).__init__()
self.state = StateWavelength()
def build(self):
# Make sure that the product is in a valid state, ie not incomplete
self.state.validate()
return copy.copy(self.state)
def set_wavelength_step_type(self, val):
self.state.wavelength_step_type = val
def set_rebin_type(self, val):
self.state.rebin_type = val
def get_wavelength_builder(data_info):
facility = data_info.facility
if facility is SANSFacility.ISIS:
return StateWavelengthBuilder()
else:
raise NotImplementedError("StateWavelengthBuilder: Could not find any valid wavelength builder for the "
"specified StateData object {0}".format(str(data_info)))
| gpl-3.0 | 6,934,669,579,327,263,000 | 42.649351 | 120 | 0.627789 | false |
wcy940418/CRNN-end-to-end | src/test.py | 1 | 1976 | from __future__ import print_function
from model import CRNN, CtcCriterion
from dataset import DatasetLmdb
import os
import tensorflow as tf
import numpy as np
class Conf:
def __init__(self):
self.nClasses = 36
self.trainBatchSize = 100
self.testBatchSize = 200
self.maxIteration = 1000
self.displayInterval = 200
self.testInteval = 100
self.modelParFile = './crnn.model'
self.dataSet = '../data'
self.maxLength = 24
def labelInt2Char(n):
if n >= 0 and n <=9:
c = chr(n + 48)
elif n >= 10 and n<= 35:
c = chr(n + 97 - 10)
elif n == 36:
c = ''
return c
def convertSparseArrayToStrs(p):
print(p[0].shape, p[1].shape, p[2].shape)
print(p[2][0], p[2][1])
results = []
labels = []
for i in range(p[2][0]):
results.append([36 for x in range(p[2][1])])
for i in range(p[0].shape[0]):
x, y = p[0][i]
results[x][y] = p[1][i]
for i in range(len(results)):
label = ''
for j in range(len(results[i])):
label += labelInt2Char(results[i][j])
labels.append(label)
return labels
if __name__ == '__main__':
gConfig = Conf()
sess = tf.InteractiveSession()
weights = None
if os.path.isfile(gConfig.modelParFile+'.index'):
weights = gConfig.modelParFile
imgs = tf.placeholder(tf.float32, [None, 32, 100])
labels = tf.sparse_placeholder(tf.int32)
batches = tf.placeholder(tf.int32, [None])
isTraining = tf.placeholder(tf.bool)
crnn = CRNN(imgs, gConfig, isTraining, weights, sess)
ctc = CtcCriterion(crnn.prob, labels, batches)
data = DatasetLmdb(gConfig.dataSet)
testSeqLength = [gConfig.maxLength for i in range(10)]
batchSet, labelSet = data.nextBatch(10)
p = sess.run(ctc.decoded, feed_dict={
crnn.inputImgs:batchSet,
crnn.isTraining:False,
ctc.target:labelSet,
ctc.nSamples:testSeqLength
})
original = convertSparseArrayToStrs(labelSet)
predicted = convertSparseArrayToStrs(p[0])
for i in range(len(original)):
print("original: %s, predicted: %s" % (original[i], predicted[i])) | mit | 3,972,835,992,078,863,400 | 25.36 | 68 | 0.67004 | false |
benthomasson/ansible | lib/ansible/modules/identity/ipa/ipa_host.py | 7 | 9802 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_host
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA host
description:
- Add, modify and delete an IPA host using IPA API
options:
fqdn:
description:
- Full qualified domain name.
- Can not be changed as it is the unique identifier.
required: true
aliases: ["name"]
description:
description:
- A description of this host.
required: false
force:
description:
- Force host name even if not in DNS.
required: false
ip_address:
description:
- Add the host to DNS with this IP address.
required: false
mac_address:
description:
- List of Hardware MAC address(es) off this host.
- If option is omitted MAC addresses will not be checked or changed.
- If an empty list is passed all assigned MAC addresses will be removed.
- MAC addresses that are already assigned but not passed will be removed.
required: false
aliases: ["macaddress"]
ns_host_location:
description:
- Host location (e.g. "Lab 2")
required: false
aliases: ["nshostlocation"]
ns_hardware_platform:
description:
- Host hardware platform (e.g. "Lenovo T61")
required: false
aliases: ["nshardwareplatform"]
ns_os_version:
description:
- Host operating system and version (e.g. "Fedora 9")
required: false
aliases: ["nsosversion"]
user_certificate:
description:
- List of Base-64 encoded server certificates.
- If option is omitted certificates will not be checked or changed.
- If an empty list is passed all assigned certificates will be removed.
- Certificates already assigned but not passed will be removed.
required: false
aliases: ["usercertificate"]
state:
description: State to ensure
required: false
default: present
choices: ["present", "absent", "disabled"]
ipa_port:
description: Port of IPA server
required: false
default: 443
ipa_host:
description: IP or hostname of IPA server
required: false
default: ipa.example.com
ipa_user:
description: Administrative account used on IPA server
required: false
default: admin
ipa_pass:
description: Password of administrative user
required: true
ipa_prot:
description: Protocol used by IPA server
required: false
default: https
choices: ["http", "https"]
validate_certs:
description:
- This only applies if C(ipa_prot) is I(https).
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
required: false
default: true
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure host is present
- ipa_host:
name: host01.example.com
description: Example host
ip_address: 192.168.0.123
ns_host_location: Lab
ns_os_version: CentOS 7
ns_hardware_platform: Lenovo T61
mac_address:
- "08:00:27:E3:B1:2D"
- "52:54:00:BD:97:1E"
state: present
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure host is disabled
- ipa_host:
name: host01.example.com
state: disabled
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure that all user certificates are removed
- ipa_host:
name: host01.example.com
user_certificate: []
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure host is absent
- ipa_host:
name: host01.example.com
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
host:
description: Host as returned by IPA API.
returned: always
type: dict
host_diff:
description: List of options that differ and would be changed
returned: if check mode and a difference is found
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ipa import IPAClient
from ansible.module_utils._text import to_native
class HostIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(HostIPAClient, self).__init__(module, host, port, protocol)
def host_find(self, name):
return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name})
def host_add(self, name, host):
return self._post_json(method='host_add', name=name, item=host)
def host_mod(self, name, host):
return self._post_json(method='host_mod', name=name, item=host)
def host_del(self, name):
return self._post_json(method='host_del', name=name)
def host_disable(self, name):
return self._post_json(method='host_disable', name=name)
def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None,
ns_os_version=None, user_certificate=None, mac_address=None):
data = {}
if description is not None:
data['description'] = description
if force is not None:
data['force'] = force
if ip_address is not None:
data['ip_address'] = ip_address
if ns_host_location is not None:
data['nshostlocation'] = ns_host_location
if ns_hardware_platform is not None:
data['nshardwareplatform'] = ns_hardware_platform
if ns_os_version is not None:
data['nsosversion'] = ns_os_version
if user_certificate is not None:
data['usercertificate'] = [{"__base64__": item} for item in user_certificate]
if mac_address is not None:
data['macaddress'] = mac_address
return data
def get_host_diff(client, ipa_host, module_host):
non_updateable_keys = ['force', 'ip_address']
for key in non_updateable_keys:
if key in module_host:
del module_host[key]
return client.get_diff(ipa_data=ipa_host, module_data=module_host)
def ensure(module, client):
name = module.params['name']
state = module.params['state']
ipa_host = client.host_find(name=name)
module_host = get_host_dict(description=module.params['description'],
force=module.params['force'], ip_address=module.params['ip_address'],
ns_host_location=module.params['ns_host_location'],
ns_hardware_platform=module.params['ns_hardware_platform'],
ns_os_version=module.params['ns_os_version'],
user_certificate=module.params['user_certificate'],
mac_address=module.params['mac_address'])
changed = False
if state in ['present', 'enabled', 'disabled']:
if not ipa_host:
changed = True
if not module.check_mode:
client.host_add(name=name, host=module_host)
else:
diff = get_host_diff(client, ipa_host, module_host)
if len(diff) > 0:
changed = True
if not module.check_mode:
data = {}
for key in diff:
data[key] = module_host.get(key)
client.host_mod(name=name, host=data)
else:
if ipa_host:
changed = True
if not module.check_mode:
client.host_del(name=name)
return changed, client.host_find(name=name)
def main():
module = AnsibleModule(
argument_spec=dict(
description=dict(type='str', required=False),
fqdn=dict(type='str', required=True, aliases=['name']),
force=dict(type='bool', required=False),
ip_address=dict(type='str', required=False),
ns_host_location=dict(type='str', required=False, aliases=['nshostlocation']),
ns_hardware_platform=dict(type='str', required=False, aliases=['nshardwareplatform']),
ns_os_version=dict(type='str', required=False, aliases=['nsosversion']),
user_certificate=dict(type='list', required=False, aliases=['usercertificate']),
mac_address=dict(type='list', required=False, aliases=['macaddress']),
state=dict(type='str', required=False, default='present',
choices=['present', 'absent', 'enabled', 'disabled']),
ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
ipa_host=dict(type='str', required=False, default='ipa.example.com'),
ipa_port=dict(type='int', required=False, default=443),
ipa_user=dict(type='str', required=False, default='admin'),
ipa_pass=dict(type='str', required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
),
supports_check_mode=True,
)
client = HostIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, host = ensure(module, client)
module.exit_json(changed=changed, host=host)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 | 8,054,805,247,081,936,000 | 32.568493 | 114 | 0.627015 | false |
unigent/OpenWrt-Firefly-SDK | staging_dir/host/lib/scons-2.3.1/SCons/Platform/os2.py | 8 | 2241 | """SCons.Platform.os2
Platform-specific initialization for OS/2 systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/os2.py 2014/03/02 14:18:15 garyo"
import win32
def generate(env):
if 'ENV' not in env:
env['ENV'] = {}
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = '.exe'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
env['SHLIBPREFIX'] = ''
env['SHLIBSUFFIX'] = '.dll'
env['LIBPREFIXES'] = '$LIBPREFIX'
env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ]
env['HOST_OS'] = 'os2'
env['HOST_ARCH'] = win32.get_architecture().arch
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 | -3,345,673,190,145,830,000 | 37.637931 | 119 | 0.687193 | false |
pabloborrego93/edx-platform | common/test/acceptance/tests/discussion/test_cohorts.py | 9 | 5802 | """
Tests related to the cohorting feature.
"""
from uuid import uuid4
from common.test.acceptance.tests.discussion.helpers import BaseDiscussionMixin, BaseDiscussionTestCase
from common.test.acceptance.tests.discussion.helpers import CohortTestMixin
from common.test.acceptance.tests.helpers import UniqueCourseTest
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.fixtures.course import (CourseFixture, XBlockFixtureDesc)
from common.test.acceptance.pages.lms.discussion import (
DiscussionTabSingleThreadPage,
InlineDiscussionThreadPage,
InlineDiscussionPage)
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from nose.plugins.attrib import attr
class NonCohortedDiscussionTestMixin(BaseDiscussionMixin):
"""
Mixin for tests of discussion in non-cohorted courses.
"""
def setup_cohorts(self):
"""
No cohorts are desired for this mixin.
"""
pass
def test_non_cohort_visibility_label(self):
self.setup_thread(1)
self.assertEquals(self.thread_page.get_group_visibility_label(), "This post is visible to everyone.")
class CohortedDiscussionTestMixin(BaseDiscussionMixin, CohortTestMixin):
"""
Mixin for tests of discussion in cohorted courses.
"""
def setup_cohorts(self):
"""
Sets up the course to use cohorting with a single defined cohort.
"""
self.setup_cohort_config(self.course_fixture)
self.cohort_1_name = "Cohort 1"
self.cohort_1_id = self.add_manual_cohort(self.course_fixture, self.cohort_1_name)
def test_cohort_visibility_label(self):
# Must be moderator to view content in a cohort other than your own
AutoAuthPage(self.browser, course_id=self.course_id, roles="Moderator").visit()
self.thread_id = self.setup_thread(1, group_id=self.cohort_1_id)
self.assertEquals(
self.thread_page.get_group_visibility_label(),
"This post is visible only to {}.".format(self.cohort_1_name)
)
# Disable cohorts and verify that the post now shows as visible to everyone.
self.disable_cohorting(self.course_fixture)
self.refresh_thread_page(self.thread_id)
self.assertEquals(self.thread_page.get_group_visibility_label(), "This post is visible to everyone.")
class DiscussionTabSingleThreadTest(BaseDiscussionTestCase):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
self.setup_cohorts()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = DiscussionTabSingleThreadPage(self.browser, self.course_id, self.discussion_id, thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.visit()
# pylint: disable=unused-argument
def refresh_thread_page(self, thread_id):
self.browser.refresh()
self.thread_page.wait_for_page()
@attr(shard=5)
class CohortedDiscussionTabSingleThreadTest(DiscussionTabSingleThreadTest, CohortedDiscussionTestMixin):
"""
Tests for the discussion page displaying a single cohorted thread.
"""
# Actual test method(s) defined in CohortedDiscussionTestMixin.
pass
@attr(shard=5)
class NonCohortedDiscussionTabSingleThreadTest(DiscussionTabSingleThreadTest, NonCohortedDiscussionTestMixin):
"""
Tests for the discussion page displaying a single non-cohorted thread.
"""
# Actual test method(s) defined in NonCohortedDiscussionTestMixin.
pass
class InlineDiscussionTest(UniqueCourseTest):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fixture = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
)
)
)
)
).install()
self.setup_cohorts()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
def setup_thread_page(self, thread_id):
CoursewarePage(self.browser, self.course_id).visit()
self.show_thread(thread_id)
def show_thread(self, thread_id):
discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
discussion_page.expand_discussion()
self.assertEqual(discussion_page.get_num_displayed_threads(), 1)
discussion_page.show_thread(thread_id)
self.thread_page = discussion_page.thread_page # pylint: disable=attribute-defined-outside-init
def refresh_thread_page(self, thread_id):
self.browser.refresh()
self.show_thread(thread_id)
@attr(shard=5)
class CohortedInlineDiscussionTest(InlineDiscussionTest, CohortedDiscussionTestMixin):
"""
Tests for cohorted inline discussions.
"""
# Actual test method(s) defined in CohortedDiscussionTestMixin.
pass
@attr(shard=5)
class NonCohortedInlineDiscussionTest(InlineDiscussionTest, NonCohortedDiscussionTestMixin):
"""
Tests for non-cohorted inline discussions.
"""
# Actual test method(s) defined in NonCohortedDiscussionTestMixin.
pass
| agpl-3.0 | 6,886,545,071,831,901,000 | 36.432258 | 167 | 0.689935 | false |
bchan1234/inf1340_2015_asst1 | test_exercise3.py | 1 | 1682 | #!/usr/bin/env python
""" Module to test exercise3.py """
__author__ = 'Susan Sim'
__email__ = "[email protected]"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
import pytest
import mock
from exercise3 import diagnose_car
def test_accepted_inputs(capsys):
with mock.patch("__builtin__.raw_input", side_effect=["Y", "Y"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Clean terminals and try starting again.\n"
with mock.patch("__builtin__.raw_input", side_effect=["Y", "N"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Replace cables and try again.\n"
with mock.patch("__builtin__.raw_input", side_effect=["N", "Y"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Replace the battery.\n"
with mock.patch("__builtin__.raw_input", side_effect=["N", "N", "Y"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Check spark plug connections.\n"
with mock.patch("__builtin__.raw_input", side_effect=["N", "N", "N", "N"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Engine is not getting enough fuel. Clean fuel pump.\n"
with mock.patch("__builtin__.raw_input", side_effect=["N", "N", "N", "Y", "N"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Check to ensure the choke is opening and closing.\n"
with mock.patch("__builtin__.raw_input", side_effect=["N", "N", "N", "Y", "Y"]):
diagnose_car()
out, err = capsys.readouterr()
assert out == "Get it in for service.\n"
| mit | 8,326,796,137,564,447,000 | 31.980392 | 84 | 0.580856 | false |
Leila20/django | tests/admin_views/urls.py | 55 | 1024 | from django.conf.urls import include, url
from . import admin, custom_has_permission_admin, customadmin, views
urlpatterns = [
url(r'^test_admin/admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^test_admin/admin/secure-view/$', views.secure_view, name='secure_view'),
url(r'^test_admin/admin/secure-view2/$', views.secure_view2, name='secure_view2'),
url(r'^test_admin/admin/', admin.site.urls),
url(r'^test_admin/admin2/', customadmin.site.urls),
url(r'^test_admin/admin3/', (admin.site.get_urls(), 'admin', 'admin3'), dict(form_url='pony')),
url(r'^test_admin/admin4/', customadmin.simple_site.urls),
url(r'^test_admin/admin5/', admin.site2.urls),
url(r'^test_admin/admin7/', admin.site7.urls),
# All admin views accept `extra_context` to allow adding it like this:
url(r'^test_admin/admin8/', (admin.site.get_urls(), 'admin', 'admin-extra-context'), {'extra_context': {}}),
url(r'^test_admin/has_permission_admin/', custom_has_permission_admin.site.urls),
]
| bsd-3-clause | 5,966,273,398,820,268,000 | 55.888889 | 112 | 0.680664 | false |
thomashaw/SecGen | modules/utilities/unix/ctf/metactf/files/repository/src_angr/dist/scaffold11.py | 6 | 1886 | # This time, the solution involves simply replacing scanf with our own version,
# since Angr does not support requesting multiple parameters with scanf.
import angr
import claripy
import sys
def main(argv):
path_to_binary = argv[1]
project = angr.Project(path_to_binary)
initial_state = project.factory.entry_state()
class ReplacementScanf(angr.SimProcedure):
# Finish the parameters to the scanf function. Hint: 'scanf("%u %u", ...)'.
# (!)
def run(self, format_string, scanf0_address, ...):
scanf0 = claripy.BVS('scanf0', ???)
...
# The scanf function writes user input to the buffers to which the
# parameters point.
self.state.memory.store(scanf0_address, scanf0, endness=project.arch.memory_endness)
...
# Now, we want to 'set aside' references to our symbolic values in the
# globals plugin included by default with a state. You will need to
# store multiple bitvectors. You can either use a list, tuple, or multiple
# keys to reference the different bitvectors.
# (!)
self.state.globals['solution0'] = ???
self.state.globals['solution1'] = ???
scanf_symbol = ???
project.hook_symbol(scanf_symbol, ReplacementScanf())
simulation = project.factory.simgr(initial_state)
def is_successful(state):
stdout_output = state.posix.dumps(sys.stdout.fileno())
return ???
def should_abort(state):
stdout_output = state.posix.dumps(sys.stdout.fileno())
return ???
simulation.explore(find=is_successful, avoid=should_abort)
if simulation.found:
solution_state = simulation.found[0]
# Grab whatever you set aside in the globals dict.
stored_solutions0 = solution_state.globals['solution0']
...
solution = ???
print solution
else:
raise Exception('Could not find the solution')
if __name__ == '__main__':
main(sys.argv)
| gpl-3.0 | 1,531,213,973,931,023,000 | 29.419355 | 90 | 0.675504 | false |
ledatelescope/bifrost | test/benchmarks/bifrost_benchmarks/pipeline_benchmarker.py | 1 | 2851 | """ This file defines the PipelineBenchmarker class
This class is used for timeing the execution of pipelines and
individual methods of blocks inside those pipelines.
"""
import numpy as np
from timeit import default_timer as timer
class PipelineBenchmarker(object):
""" Time total clock and individual parts of blocks in a pipeline """
def __init__(self):
""" Set two clock variables for recording """
self.total_clock_time = 0
self.relevant_clock_time = 0
def timeit(self, method):
""" Decorator for timing execution of a method
Returns:
function: the original function, wrapped
with a time accumulator
"""
def timed(*args, **kw):
ts = timer()
result = method(*args, **kw)
te = timer()
self.relevant_clock_time += te-ts
return result
return timed
def reset_times(self):
""" Set the two class clocks to 0 """
self.total_clock_time = 0
self.relevant_clock_time = 0
def run_benchmark(self):
""" Run the benchmark once
This file should contain the wrapping of a method
with self.timeit(), e.g., block.on_data = self.timeit(block.on_data),
which will set the relevant_clock_time to start tracking
that particular method. Note that you can do this with multiple
methods, and it will accumulate times for all of them.
"""
raise NotImplementedError(
"You need to redefine the run_benchmark class!")
def average_benchmark(self, number_runs):
""" Average over a number of tests for more accurate times
Args:
number_runs (int): Number of times to run the benchmark,
excluding the first time (which runs
without recording, as it is always
slower)
Returns:
(clock time, relevant time) - A tuple of the total average
clock time of the pipeline
and the times which were
chosen to be measured inside
run_benchmark
"""
""" First test is always longer """
self.run_benchmark()
self.reset_times()
total_clock_times = np.zeros(number_runs)
relevant_clock_times = np.zeros(number_runs)
for i in range(number_runs):
self.run_benchmark()
total_clock_times[i] = self.total_clock_time
relevant_clock_times[i] = self.relevant_clock_time
self.reset_times()
return np.average(total_clock_times), np.average(relevant_clock_times)
| bsd-3-clause | -6,278,130,528,610,996,000 | 33.768293 | 78 | 0.559102 | false |
paolodedios/tensorflow | tensorflow/python/ops/standard_ops.py | 6 | 5914 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Import names of Tensor Flow standard Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform as _platform
import sys as _sys
from tensorflow.python import autograph
from tensorflow.python.training.experimental import loss_scaling_gradient_tape
# pylint: disable=g-bad-import-order
# Imports the following modules so that @RegisterGradient get executed.
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import cudnn_rnn_grad
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import manip_grad
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import random_grad
from tensorflow.python.ops import rnn_grad
from tensorflow.python.ops import sparse_grad
from tensorflow.python.ops import state_grad
from tensorflow.python.ops import tensor_array_grad
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.array_ops import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.check_ops import *
from tensorflow.python.ops.clip_ops import *
from tensorflow.python.ops.special_math_ops import *
# TODO(vrv): Switch to import * once we're okay with exposing the module.
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.control_flow_ops import case
from tensorflow.python.ops.control_flow_ops import cond
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import no_op
from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin
# pylint: enable=redefined-builtin
from tensorflow.python.eager import wrap_function
from tensorflow.python.ops.control_flow_ops import while_loop
from tensorflow.python.ops.batch_ops import *
from tensorflow.python.ops.critical_section_ops import *
from tensorflow.python.ops.data_flow_ops import *
from tensorflow.python.ops.functional_ops import *
from tensorflow.python.ops.gradients import *
from tensorflow.python.ops.histogram_ops import *
from tensorflow.python.ops.init_ops import *
from tensorflow.python.ops.io_ops import *
from tensorflow.python.ops.linalg_ops import *
from tensorflow.python.ops.logging_ops import Print
from tensorflow.python.ops.logging_ops import get_summary_op
from tensorflow.python.ops.logging_ops import timestamp
from tensorflow.python.ops.lookup_ops import initialize_all_tables
from tensorflow.python.ops.lookup_ops import tables_initializer
from tensorflow.python.ops.manip_ops import *
from tensorflow.python.ops.math_ops import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.numerics import *
from tensorflow.python.ops.parsing_ops import *
from tensorflow.python.ops.partitioned_variables import *
from tensorflow.python.ops.proto_ops import *
from tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch
from tensorflow.python.ops.ragged import ragged_operators as _ragged_operators
from tensorflow.python.ops.random_ops import *
from tensorflow.python.ops.script_ops import py_func
from tensorflow.python.ops.session_ops import *
from tensorflow.python.ops.sort_ops import *
from tensorflow.python.ops.sparse_ops import *
from tensorflow.python.ops.state_ops import assign
from tensorflow.python.ops.state_ops import assign_add
from tensorflow.python.ops.state_ops import assign_sub
from tensorflow.python.ops.state_ops import count_up_to
from tensorflow.python.ops.state_ops import scatter_add
from tensorflow.python.ops.state_ops import scatter_div
from tensorflow.python.ops.state_ops import scatter_mul
from tensorflow.python.ops.state_ops import scatter_sub
from tensorflow.python.ops.state_ops import scatter_min
from tensorflow.python.ops.state_ops import scatter_max
from tensorflow.python.ops.state_ops import scatter_update
from tensorflow.python.ops.state_ops import scatter_nd_add
from tensorflow.python.ops.state_ops import scatter_nd_sub
# TODO(simister): Re-enable once binary size increase due to scatter_nd
# ops is under control.
# from tensorflow.python.ops.state_ops import scatter_nd_mul
# from tensorflow.python.ops.state_ops import scatter_nd_div
from tensorflow.python.ops.state_ops import scatter_nd_update
from tensorflow.python.ops.stateless_random_ops import *
from tensorflow.python.ops.string_ops import *
from tensorflow.python.ops.template import *
from tensorflow.python.ops.tensor_array_ops import *
from tensorflow.python.ops.variable_scope import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.variables import *
from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map
# pylint: disable=g-import-not-at-top
if _platform.system() == "Windows":
from tensorflow.python.compiler.tensorrt import trt_convert_windows as trt
else:
from tensorflow.python.compiler.tensorrt import trt_convert as trt
# pylint: enable=g-import-not-at-top
# pylint: enable=wildcard-import
# pylint: enable=g-bad-import-order
# These modules were imported to set up RaggedTensor operators and dispatchers:
del _ragged_dispatch, _ragged_operators
| apache-2.0 | -2,332,727,853,931,318,000 | 46.312 | 93 | 0.800473 | false |
chergert/gnome-builder | src/plugins/meson-templates/resources/src/gi_composites.py | 2 | 9161 | #
# Copyright © 2015 Dustin Spicuzza <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
from os.path import abspath, join
import inspect
import warnings
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
__all__ = ['GtkTemplate']
class GtkTemplateWarning(UserWarning):
pass
def _connect_func(builder, obj, signal_name, handler_name,
connect_object, flags, cls):
'''Handles GtkBuilder signal connect events'''
if connect_object is None:
extra = ()
else:
extra = (connect_object,)
# The handler name refers to an attribute on the template instance,
# so ask GtkBuilder for the template instance
template_inst = builder.get_object(cls.__gtype_name__)
if template_inst is None: # This should never happen
errmsg = "Internal error: cannot find template instance! obj: %s; " \
"signal: %s; handler: %s; connect_obj: %s; class: %s" % \
(obj, signal_name, handler_name, connect_object, cls)
warnings.warn(errmsg, GtkTemplateWarning)
return
handler = getattr(template_inst, handler_name)
if flags == GObject.ConnectFlags.AFTER:
obj.connect_after(signal_name, handler, *extra)
else:
obj.connect(signal_name, handler, *extra)
template_inst.__connected_template_signals__.add(handler_name)
def _register_template(cls, template_bytes):
'''Registers the template for the widget and hooks init_template'''
# This implementation won't work if there are nested templates, but
# we can't do that anyways due to PyGObject limitations so it's ok
if not hasattr(cls, 'set_template'):
raise TypeError("Requires PyGObject 3.13.2 or greater")
cls.set_template(template_bytes)
bound_methods = set()
bound_widgets = set()
# Walk the class, find marked callbacks and child attributes
for name in dir(cls):
o = getattr(cls, name, None)
if inspect.ismethod(o):
if hasattr(o, '_gtk_callback'):
bound_methods.add(name)
# Don't need to call this, as connect_func always gets called
#cls.bind_template_callback_full(name, o)
elif isinstance(o, _Child):
cls.bind_template_child_full(name, True, 0)
bound_widgets.add(name)
# Have to setup a special connect function to connect at template init
# because the methods are not bound yet
cls.set_connect_func(_connect_func, cls)
cls.__gtemplate_methods__ = bound_methods
cls.__gtemplate_widgets__ = bound_widgets
base_init_template = cls.init_template
cls.init_template = lambda s: _init_template(s, cls, base_init_template)
def _init_template(self, cls, base_init_template):
'''This would be better as an override for Gtk.Widget'''
# TODO: could disallow using a metaclass.. but this is good enough
# .. if you disagree, feel free to fix it and issue a PR :)
if self.__class__ is not cls:
raise TypeError("Inheritance from classes with @GtkTemplate decorators "
"is not allowed at this time")
connected_signals = set()
self.__connected_template_signals__ = connected_signals
base_init_template(self)
for name in self.__gtemplate_widgets__:
widget = self.get_template_child(cls, name)
self.__dict__[name] = widget
if widget is None:
# Bug: if you bind a template child, and one of them was
# not present, then the whole template is broken (and
# it's not currently possible for us to know which
# one is broken either -- but the stderr should show
# something useful with a Gtk-CRITICAL message)
raise AttributeError("A missing child widget was set using "
"GtkTemplate.Child and the entire "
"template is now broken (widgets: %s)" %
', '.join(self.__gtemplate_widgets__))
for name in self.__gtemplate_methods__.difference(connected_signals):
errmsg = ("Signal '%s' was declared with @GtkTemplate.Callback " +
"but was not present in template") % name
warnings.warn(errmsg, GtkTemplateWarning)
# TODO: Make it easier for IDE to introspect this
class _Child(object):
'''
Assign this to an attribute in your class definition and it will
be replaced with a widget defined in the UI file when init_template
is called
'''
__slots__ = []
@staticmethod
def widgets(count):
'''
Allows declaring multiple widgets with less typing::
button \
label1 \
label2 = GtkTemplate.Child.widgets(3)
'''
return [_Child() for _ in range(count)]
class _GtkTemplate(object):
'''
Use this class decorator to signify that a class is a composite
widget which will receive widgets and connect to signals as
defined in a UI template. You must call init_template to
cause the widgets/signals to be initialized from the template::
@GtkTemplate(ui='foo.ui')
class Foo(Gtk.Box):
def __init__(self):
super(Foo, self).__init__()
self.init_template()
The 'ui' parameter can either be a file path or a GResource resource
path::
@GtkTemplate(ui='/org/example/foo.ui')
class Foo(Gtk.Box):
pass
To connect a signal to a method on your instance, do::
@GtkTemplate.Callback
def on_thing_happened(self, widget):
pass
To create a child attribute that is retrieved from your template,
add this to your class definition::
@GtkTemplate(ui='foo.ui')
class Foo(Gtk.Box):
widget = GtkTemplate.Child()
Note: This is implemented as a class decorator, but if it were
included with PyGI I suspect it might be better to do this
in the GObject metaclass (or similar) so that init_template
can be called automatically instead of forcing the user to do it.
.. note:: Due to limitations in PyGObject, you may not inherit from
python objects that use the GtkTemplate decorator.
'''
__ui_path__ = None
@staticmethod
def Callback(f):
'''
Decorator that designates a method to be attached to a signal from
the template
'''
f._gtk_callback = True
return f
Child = _Child
@staticmethod
def set_ui_path(*path):
'''
If using file paths instead of resources, call this *before*
loading anything that uses GtkTemplate, or it will fail to load
your template file
:param path: one or more path elements, will be joined together
to create the final path
TODO: Alternatively, could wait until first class instantiation
before registering templates? Would need a metaclass...
'''
_GtkTemplate.__ui_path__ = abspath(join(*path))
def __init__(self, ui):
self.ui = ui
def __call__(self, cls):
if not issubclass(cls, Gtk.Widget):
raise TypeError("Can only use @GtkTemplate on Widgets")
# Nested templates don't work
if hasattr(cls, '__gtemplate_methods__'):
raise TypeError("Cannot nest template classes")
# Load the template either from a resource path or a file
# - Prefer the resource path first
try:
template_bytes = Gio.resources_lookup_data(self.ui, Gio.ResourceLookupFlags.NONE)
except GLib.GError:
ui = self.ui
if isinstance(ui, (list, tuple)):
ui = join(ui)
if _GtkTemplate.__ui_path__ is not None:
ui = join(_GtkTemplate.__ui_path__, ui)
with open(ui, 'rb') as fp:
template_bytes = GLib.Bytes.new(fp.read())
_register_template(cls, template_bytes)
return cls
# Future shim support if this makes it into PyGI?
#if hasattr(Gtk, 'GtkTemplate'):
# GtkTemplate = lambda c: c
#else:
GtkTemplate = _GtkTemplate
| gpl-3.0 | 5,541,414,369,527,608,000 | 32.553114 | 93 | 0.61845 | false |
munkiat/libcloud | libcloud/dns/drivers/gandi.py | 27 | 8835 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
__all__ = [
'GandiDNSDriver'
]
from libcloud.common.gandi import BaseGandiDriver, GandiConnection
from libcloud.common.gandi import GandiResponse
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import RecordError
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
TTL_MIN = 30
TTL_MAX = 2592000 # 30 days
class NewZoneVersion(object):
"""
Changes to a zone in the Gandi DNS service need to be wrapped in a new
version object. The changes are made to the new version, then that
version is made active.
In effect, this is a transaction.
Any calls made inside this context manager will be applied to a new version
id. If your changes are successful (and only if they are successful) they
are activated.
"""
def __init__(self, driver, zone):
self.driver = driver
self.connection = driver.connection
self.zone = zone
def __enter__(self):
zid = int(self.zone.id)
self.connection.set_context({'zone_id': self.zone.id})
vid = self.connection.request('domain.zone.version.new', zid).object
self.vid = vid
return vid
def __exit__(self, type, value, traceback):
if not traceback:
zid = int(self.zone.id)
con = self.connection
con.set_context({'zone_id': self.zone.id})
con.request('domain.zone.version.set', zid, self.vid).object
class GandiDNSResponse(GandiResponse):
exceptions = {
581042: ZoneDoesNotExistError,
}
class GandiDNSConnection(GandiConnection):
responseCls = GandiDNSResponse
class GandiDNSDriver(BaseGandiDriver, DNSDriver):
"""
API reference can be found at:
http://doc.rpc.gandi.net/domain/reference.html
"""
type = Provider.GANDI
name = 'Gandi DNS'
website = 'http://www.gandi.net/domain'
connectionCls = GandiDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.LOC: 'LOC',
RecordType.MX: 'MX',
RecordType.NS: 'NS',
RecordType.SPF: 'SPF',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT',
RecordType.WKS: 'WKS',
}
def _to_zone(self, zone):
return Zone(
id=str(zone['id']),
domain=zone['name'],
type='master',
ttl=0,
driver=self,
extra={}
)
def _to_zones(self, zones):
ret = []
for z in zones:
ret.append(self._to_zone(z))
return ret
def list_zones(self):
zones = self.connection.request('domain.zone.list')
return self._to_zones(zones.object)
def get_zone(self, zone_id):
zid = int(zone_id)
self.connection.set_context({'zone_id': zone_id})
zone = self.connection.request('domain.zone.info', zid)
return self._to_zone(zone.object)
def create_zone(self, domain, type='master', ttl=None, extra=None):
params = {
'name': domain,
}
info = self.connection.request('domain.zone.create', params)
return self._to_zone(info.object)
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
zid = int(zone.id)
params = {'name': domain}
self.connection.set_context({'zone_id': zone.id})
zone = self.connection.request('domain.zone.update', zid, params)
return self._to_zone(zone.object)
def delete_zone(self, zone):
zid = int(zone.id)
self.connection.set_context({'zone_id': zone.id})
res = self.connection.request('domain.zone.delete', zid)
return res.object
def _to_record(self, record, zone):
return Record(
id='%s:%s' % (record['type'], record['name']),
name=record['name'],
type=self._string_to_record_type(record['type']),
data=record['value'],
zone=zone,
driver=self,
extra={'ttl': record['ttl']}
)
def _to_records(self, records, zone):
retval = []
for r in records:
retval.append(self._to_record(r, zone))
return retval
def list_records(self, zone):
zid = int(zone.id)
self.connection.set_context({'zone_id': zone.id})
records = self.connection.request('domain.zone.record.list', zid, 0)
return self._to_records(records.object, zone)
def get_record(self, zone_id, record_id):
zid = int(zone_id)
record_type, name = record_id.split(':', 1)
filter_opts = {
'name': name,
'type': record_type
}
self.connection.set_context({'zone_id': zone_id})
records = self.connection.request('domain.zone.record.list',
zid, 0, filter_opts).object
if len(records) == 0:
raise RecordDoesNotExistError(value='', driver=self,
record_id=record_id)
return self._to_record(records[0], self.get_zone(zone_id))
def _validate_record(self, record_id, name, record_type, data, extra):
if len(data) > 1024:
raise RecordError('Record data must be <= 1024 characters',
driver=self, record_id=record_id)
if extra and 'ttl' in extra:
if extra['ttl'] < TTL_MIN:
raise RecordError('TTL must be at least 30 seconds',
driver=self, record_id=record_id)
if extra['ttl'] > TTL_MAX:
raise RecordError('TTL must not excdeed 30 days',
driver=self, record_id=record_id)
def create_record(self, name, zone, type, data, extra=None):
self._validate_record(None, name, type, data, extra)
zid = int(zone.id)
create = {
'name': name,
'type': self.RECORD_TYPE_MAP[type],
'value': data
}
if 'ttl' in extra:
create['ttl'] = extra['ttl']
with NewZoneVersion(self, zone) as vid:
con = self.connection
con.set_context({'zone_id': zone.id})
rec = con.request('domain.zone.record.add',
zid, vid, create).object
return self._to_record(rec, zone)
def update_record(self, record, name, type, data, extra):
self._validate_record(record.id, name, type, data, extra)
filter_opts = {
'name': record.name,
'type': self.RECORD_TYPE_MAP[record.type]
}
update = {
'name': name,
'type': self.RECORD_TYPE_MAP[type],
'value': data
}
if 'ttl' in extra:
update['ttl'] = extra['ttl']
zid = int(record.zone.id)
with NewZoneVersion(self, record.zone) as vid:
con = self.connection
con.set_context({'zone_id': record.zone.id})
con.request('domain.zone.record.delete',
zid, vid, filter_opts)
res = con.request('domain.zone.record.add',
zid, vid, update).object
return self._to_record(res, record.zone)
def delete_record(self, record):
zid = int(record.zone.id)
filter_opts = {
'name': record.name,
'type': self.RECORD_TYPE_MAP[record.type]
}
with NewZoneVersion(self, record.zone) as vid:
con = self.connection
con.set_context({'zone_id': record.zone.id})
count = con.request('domain.zone.record.delete',
zid, vid, filter_opts).object
if count == 1:
return True
raise RecordDoesNotExistError(value='No such record', driver=self,
record_id=record.id)
| apache-2.0 | -658,542,503,280,973,000 | 31.722222 | 79 | 0.580192 | false |
proxysh/Safejumper-for-Mac | buildmac/Resources/env/lib/python2.7/site-packages/twisted/scripts/test/test_scripts.py | 12 | 4880 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the command-line scripts in the top-level I{bin/} directory.
Tests for actual functionality belong elsewhere, written in a way that doesn't
involve launching child processes.
"""
from os import devnull, getcwd, chdir
from sys import executable
from subprocess import PIPE, Popen
from twisted.trial.unittest import SkipTest, TestCase
from twisted.python.modules import getModule
from twisted.python.filepath import FilePath
from twisted.python.test.test_shellcomp import ZshScriptTestMixin
def outputFromPythonScript(script, *args):
"""
Synchronously run a Python script, with the same Python interpreter that
ran the process calling this function, using L{Popen}, using the given
command-line arguments, with standard input and standard error both
redirected to L{os.devnull}, and return its output as a string.
@param script: The path to the script.
@type script: L{FilePath}
@param args: The command-line arguments to follow the script in its
invocation (the desired C{sys.argv[1:]}).
@type args: L{tuple} of L{str}
@return: the output passed to the proces's C{stdout}, without any messages
from C{stderr}.
@rtype: L{bytes}
"""
with open(devnull, "rb") as nullInput, open(devnull, "wb") as nullError:
process = Popen(
[executable, script.path] + list(args),
stdout=PIPE, stderr=nullError, stdin=nullInput)
stdout = process.communicate()[0]
return stdout
class ScriptTestsMixin(object):
"""
Mixin for L{TestCase} subclasses which defines a helper function for testing
a Twisted-using script.
"""
bin = getModule("twisted").pathEntry.filePath.child("bin")
def scriptTest(self, name):
"""
Verify that the given script runs and uses the version of Twisted
currently being tested.
This only works when running tests against a vcs checkout of Twisted,
since it relies on the scripts being in the place they are kept in
version control, and exercises their logic for finding the right version
of Twisted to use in that situation.
@param name: A path fragment, relative to the I{bin} directory of a
Twisted source checkout, identifying a script to test.
@type name: C{str}
@raise SkipTest: if the script is not where it is expected to be.
"""
script = self.bin.preauthChild(name)
if not script.exists():
raise SkipTest(
"Script tests do not apply to installed configuration.")
from twisted.copyright import version
scriptVersion = outputFromPythonScript(script, '--version')
self.assertIn(str(version), scriptVersion)
class ScriptTests(TestCase, ScriptTestsMixin):
"""
Tests for the core scripts.
"""
def test_twistd(self):
self.scriptTest("twistd")
def test_twistdPathInsert(self):
"""
The twistd script adds the current working directory to sys.path so
that it's able to import modules from it.
"""
script = self.bin.child("twistd")
if not script.exists():
raise SkipTest(
"Script tests do not apply to installed configuration.")
cwd = getcwd()
self.addCleanup(chdir, cwd)
testDir = FilePath(self.mktemp())
testDir.makedirs()
chdir(testDir.path)
testDir.child("bar.tac").setContent(
"import sys\n"
"print sys.path\n")
output = outputFromPythonScript(script, '-ny', 'bar.tac')
self.assertIn(repr(testDir.path), output)
def test_trial(self):
self.scriptTest("trial")
def test_trialPathInsert(self):
"""
The trial script adds the current working directory to sys.path so that
it's able to import modules from it.
"""
script = self.bin.child("trial")
if not script.exists():
raise SkipTest(
"Script tests do not apply to installed configuration.")
cwd = getcwd()
self.addCleanup(chdir, cwd)
testDir = FilePath(self.mktemp())
testDir.makedirs()
chdir(testDir.path)
testDir.child("foo.py").setContent("")
output = outputFromPythonScript(script, 'foo')
self.assertIn("PASSED", output)
def test_pyhtmlizer(self):
self.scriptTest("pyhtmlizer")
class ZshIntegrationTests(TestCase, ZshScriptTestMixin):
"""
Test that zsh completion functions are generated without error
"""
generateFor = [('twistd', 'twisted.scripts.twistd.ServerOptions'),
('trial', 'twisted.scripts.trial.Options'),
('pyhtmlizer', 'twisted.scripts.htmlizer.Options'),
]
| gpl-2.0 | 1,327,340,657,072,518,400 | 31.972973 | 80 | 0.649385 | false |
gangadharkadam/saloon_erp_install | erpnext/setup/doctype/company/company.py | 3 | 9857 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, os
from frappe import _
from frappe.utils import cint
import frappe.defaults
from frappe.model.document import Document
class Company(Document):
def onload(self):
self.get("__onload").transactions_exist = self.check_if_transactions_exist()
def check_if_transactions_exist(self):
exists = False
for doctype in ["Sales Invoice", "Delivery Note", "Sales Order", "Quotation",
"Purchase Invoice", "Purchase Receipt", "Purchase Order", "Supplier Quotation"]:
if frappe.db.sql("""select name from `tab%s` where company=%s and docstatus=1
limit 1""" % (doctype, "%s"), self.name):
exists = True
break
return exists
def validate(self):
self.abbr = self.abbr.strip()
if self.get('__islocal') and len(self.abbr) > 5:
frappe.throw(_("Abbreviation cannot have more than 5 characters"))
if not self.abbr.strip():
frappe.throw(_("Abbreviation is mandatory"))
self.validate_default_accounts()
self.validate_currency()
def validate_default_accounts(self):
for field in ["default_bank_account", "default_cash_account", "default_receivable_account", "default_payable_account",
"default_expense_account", "default_income_account", "stock_received_but_not_billed",
"stock_adjustment_account", "expenses_included_in_valuation"]:
if self.get(field):
for_company = frappe.db.get_value("Account", self.get(field), "company")
if for_company != self.name:
frappe.throw(_("Account {0} does not belong to company: {1}")
.format(self.get(field), self.name))
def validate_currency(self):
self.previous_default_currency = frappe.db.get_value("Company", self.name, "default_currency")
if self.default_currency and self.previous_default_currency and \
self.default_currency != self.previous_default_currency and \
self.check_if_transactions_exist():
frappe.throw(_("Cannot change company's default currency, because there are existing transactions. Transactions must be cancelled to change the default currency."))
def on_update(self):
if not frappe.db.sql("""select name from tabAccount
where company=%s and docstatus<2 limit 1""", self.name):
self.create_default_accounts()
self.create_default_warehouses()
self.install_country_fixtures()
if not frappe.db.get_value("Cost Center", {"is_group": 0, "company": self.name}):
self.create_default_cost_center()
self.set_default_accounts()
if self.default_currency:
frappe.db.set_value("Currency", self.default_currency, "enabled", 1)
frappe.clear_cache()
# frappe.db.sql("update `tabCompany` set company = '%s' where company_name = '%s' "%(self.name, self.name))
def install_country_fixtures(self):
path = os.path.join(os.path.dirname(__file__), "fixtures", self.country.lower())
if os.path.exists(path.encode("utf-8")):
frappe.get_attr("erpnext.setup.doctype.company.fixtures.{0}.install".format(self.country.lower()))(self)
def create_default_warehouses(self):
for whname in (_("Stores"), _("Work In Progress"), _("Finished Goods")):
if not frappe.db.exists("Warehouse", whname + " - " + self.abbr):
stock_group = frappe.db.get_value("Account", {"account_type": "Stock",
"is_group": 1, "company": self.name})
if stock_group:
frappe.get_doc({
"doctype":"Warehouse",
"warehouse_name": whname,
"company": self.name,
"create_account_under": stock_group
}).insert()
def create_default_accounts(self):
if not self.chart_of_accounts:
self.chart_of_accounts = "Standard"
from erpnext.accounts.doctype.account.chart_of_accounts.chart_of_accounts import create_charts
create_charts(self.chart_of_accounts, self.name)
frappe.db.set(self, "default_receivable_account", frappe.db.get_value("Account",
{"company": self.name, "account_type": "Receivable"}))
frappe.db.set(self, "default_payable_account", frappe.db.get_value("Account",
{"company": self.name, "account_type": "Payable"}))
def add_acc(self, lst):
account = frappe.get_doc({
"doctype": "Account",
"freeze_account": "No",
"company": self.name
})
for d in self.fld_dict.keys():
account.set(d, (d == 'parent_account' and lst[self.fld_dict[d]]) and lst[self.fld_dict[d]] +' - '+ self.abbr or lst[self.fld_dict[d]])
if not account.parent_account:
account.flags.ignore_mandatory = True
account.insert()
def set_default_accounts(self):
self._set_default_account("default_cash_account", "Cash")
self._set_default_account("default_bank_account", "Bank")
self._set_default_account("round_off_account", "Round Off")
if cint(frappe.db.get_single_value("Accounts Settings", "auto_accounting_for_stock")):
self._set_default_account("stock_received_but_not_billed", "Stock Received But Not Billed")
self._set_default_account("stock_adjustment_account", "Stock Adjustment")
self._set_default_account("expenses_included_in_valuation", "Expenses Included In Valuation")
self._set_default_account("default_expense_account", "Cost of Goods Sold")
if not self.default_income_account:
self.db_set("default_income_account", frappe.db.get_value("Account",
{"account_name": _("Sales"), "company": self.name}))
def _set_default_account(self, fieldname, account_type):
if self.get(fieldname):
return
account = frappe.db.get_value("Account", {"account_type": account_type,
"is_group": 0, "company": self.name})
if account:
self.db_set(fieldname, account)
def create_default_cost_center(self):
cc_list = [
{
'cost_center_name': self.name,
'company':self.name,
'is_group': 1,
'parent_cost_center':None
},
{
'cost_center_name':_('Main'),
'company':self.name,
'is_group':0,
'parent_cost_center':self.name + ' - ' + self.abbr
},
]
for cc in cc_list:
cc.update({"doctype": "Cost Center"})
cc_doc = frappe.get_doc(cc)
cc_doc.flags.ignore_permissions = True
if cc.get("cost_center_name") == self.name:
cc_doc.flags.ignore_mandatory = True
cc_doc.insert()
frappe.db.set(self, "cost_center", _("Main") + " - " + self.abbr)
frappe.db.set(self, "round_off_cost_center", _("Main") + " - " + self.abbr)
def before_rename(self, olddn, newdn, merge=False):
if merge:
frappe.throw(_("Sorry, companies cannot be merged"))
def after_rename(self, olddn, newdn, merge=False):
frappe.db.set(self, "company_name", newdn)
frappe.db.sql("""update `tabDefaultValue` set defvalue=%s
where defkey='Company' and defvalue=%s""", (newdn, olddn))
frappe.defaults.clear_cache()
def on_trash(self):
"""
Trash accounts and cost centers for this company if no gl entry exists
"""
accounts = frappe.db.sql_list("select name from tabAccount where company=%s", self.name)
cost_centers = frappe.db.sql_list("select name from `tabCost Center` where company=%s", self.name)
warehouses = frappe.db.sql_list("select name from tabWarehouse where company=%s", self.name)
rec = frappe.db.sql("SELECT name from `tabGL Entry` where company = %s", self.name)
if not rec:
# delete Account
frappe.db.sql("delete from `tabAccount` where company = %s", self.name)
# delete cost center child table - budget detail
frappe.db.sql("""delete bd.* from `tabBudget Detail` bd, `tabCost Center` cc
where bd.parent = cc.name and cc.company = %s""", self.name)
#delete cost center
frappe.db.sql("delete from `tabCost Center` WHERE company = %s", self.name)
# delete account from customer and supplier
frappe.db.sql("delete from `tabParty Account` where company=%s", self.name)
if not frappe.db.get_value("Stock Ledger Entry", {"company": self.name}):
frappe.db.sql("""delete from `tabWarehouse` where company=%s""", self.name)
frappe.defaults.clear_default("company", value=self.name)
# clear default accounts, warehouses from item
if warehouses:
for f in ["default_warehouse", "website_warehouse"]:
frappe.db.sql("""update tabItem set %s=NULL where %s in (%s)"""
% (f, f, ', '.join(['%s']*len(warehouses))), tuple(warehouses))
frappe.db.sql("""delete from `tabItem Reorder` where warehouse in (%s)"""
% ', '.join(['%s']*len(warehouses)), tuple(warehouses))
for f in ["income_account", "expense_account"]:
frappe.db.sql("""update tabItem set %s=NULL where %s in (%s)"""
% (f, f, ', '.join(['%s']*len(accounts))), tuple(accounts))
for f in ["selling_cost_center", "buying_cost_center"]:
frappe.db.sql("""update tabItem set %s=NULL where %s in (%s)"""
% (f, f, ', '.join(['%s']*len(cost_centers))), tuple(cost_centers))
# reset default company
frappe.db.sql("""update `tabSingles` set value=""
where doctype='Global Defaults' and field='default_company'
and value=%s""", self.name)
@frappe.whitelist()
def replace_abbr(company, old, new):
new = new.strip()
if not new:
frappe.throw(_("Abbr can not be blank or space"))
frappe.only_for("System Manager")
frappe.db.set_value("Company", company, "abbr", new)
def _rename_record(dt):
for d in frappe.db.sql("select name from `tab%s` where company=%s" % (dt, '%s'), company):
parts = d[0].rsplit(" - ", 1)
if len(parts) == 1 or parts[1].lower() == old.lower():
frappe.rename_doc(dt, d[0], parts[0] + " - " + new)
for dt in ["Account", "Cost Center", "Warehouse"]:
_rename_record(dt)
frappe.db.commit()
def get_name_with_abbr(name, company):
company_abbr = frappe.db.get_value("Company", company, "abbr")
parts = name.split(" - ")
if parts[-1].lower() != company_abbr.lower():
parts.append(company_abbr)
return " - ".join(parts)
def get_company_currency(company):
return frappe.local_cache("company_currency", company,
lambda: frappe.db.get_value("Company", company, "default_currency"))
| agpl-3.0 | -1,779,982,930,998,499,300 | 36.196226 | 168 | 0.677792 | false |
alexsavio/palladium | examples/julia/config.py | 1 | 1473 | {
'dataset_loader_train': {
'__factory__': 'palladium.dataset.Table',
'path': 'iris.data',
'names': [
'sepal length',
'sepal width',
'petal length',
'petal width',
'species',
],
'target_column': 'species',
'sep': ',',
'nrows': 100,
'converters': {'species': lambda x: 1 if x == 'Iris-setosa' else -1},
},
'dataset_loader_test': {
'__factory__': 'palladium.dataset.Table',
'path': 'iris.data',
'names': [
'sepal length',
'sepal width',
'petal length',
'petal width',
'species',
],
'target_column': 'species',
'sep': ',',
'skiprows': 100,
'converters': {'species': lambda x: 1 if x == 'Iris-setosa' else -1},
},
'model': {
'__factory__': 'palladium.julia.ClassificationModel',
'fit_func': 'SVM.svm',
'predict_func': 'SVM.predict',
},
'model_persister': {
'__factory__': 'palladium.persistence.Database',
'url': 'sqlite:///iris-model.db',
},
'predict_service': {
'__factory__': 'palladium.server.PredictService',
'mapping': [
('sepal length', 'float'),
('sepal width', 'float'),
('petal length', 'float'),
('petal width', 'float'),
],
},
}
| apache-2.0 | -90,306,137,483,534,670 | 26.277778 | 77 | 0.433809 | false |
indictranstech/reciphergroup-erpnext | erpnext/projects/doctype/time_log/time_log.py | 3 | 10048 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cstr, flt, get_datetime, get_time, getdate
from dateutil.relativedelta import relativedelta
from erpnext.manufacturing.doctype.manufacturing_settings.manufacturing_settings import get_mins_between_operations
class OverlapError(frappe.ValidationError): pass
class OverProductionLoggedError(frappe.ValidationError): pass
class NotSubmittedError(frappe.ValidationError): pass
class NegativeHoursError(frappe.ValidationError): pass
from frappe.model.document import Document
class TimeLog(Document):
def validate(self):
self.set_status()
self.set_title()
self.validate_overlap()
self.validate_timings()
self.calculate_total_hours()
self.validate_time_log_for()
self.check_workstation_timings()
self.validate_production_order()
self.validate_manufacturing()
self.validate_task()
self.update_cost()
def on_submit(self):
self.update_production_order()
self.update_task()
def on_cancel(self):
self.update_production_order()
self.update_task()
def before_update_after_submit(self):
self.set_status()
def before_cancel(self):
self.set_status()
def set_status(self):
self.status = {
0: "Draft",
1: "Submitted",
2: "Cancelled"
}[self.docstatus or 0]
if self.time_log_batch:
self.status="Batched for Billing"
if self.sales_invoice:
self.status="Billed"
def set_title(self):
if self.production_order:
self.title = _("{0} for {1}").format(self.operation, self.production_order)
elif self.task:
self.title = _("{0} for {1}").format(self.activity_type, self.task)
elif self.project:
self.title = _("{0} for {1}").format(self.activity_type, self.project)
else:
self.title = self.activity_type
def validate_overlap(self):
"""Checks if 'Time Log' entries overlap for a user, workstation. """
self.validate_overlap_for("user")
self.validate_overlap_for("employee")
self.validate_overlap_for("workstation")
def validate_overlap_for(self, fieldname):
existing = self.get_overlap_for(fieldname)
if existing:
frappe.throw(_("This Time Log conflicts with {0} for {1} {2}").format(existing.name,
self.meta.get_label(fieldname), self.get(fieldname)), OverlapError)
def get_overlap_for(self, fieldname):
if not self.get(fieldname):
return
existing = frappe.db.sql("""select name, from_time, to_time from `tabTime Log`
where `{0}`=%(val)s and
(
(from_time > %(from_time)s and from_time < %(to_time)s) or
(to_time > %(from_time)s and to_time < %(to_time)s) or
(%(from_time)s > from_time and %(from_time)s < to_time) or
(%(from_time)s = from_time and %(to_time)s = to_time))
and name!=%(name)s
and ifnull(task, "")=%(task)s
and docstatus < 2""".format(fieldname),
{
"val": self.get(fieldname),
"from_time": self.from_time,
"to_time": self.to_time,
"name": self.name or "No Name",
"task": cstr(self.task)
}, as_dict=True)
return existing[0] if existing else None
def validate_timings(self):
if self.to_time and self.from_time and get_datetime(self.to_time) <= get_datetime(self.from_time):
frappe.throw(_("To Time must be greater than From Time"), NegativeHoursError)
def calculate_total_hours(self):
if self.to_time and self.from_time:
from frappe.utils import time_diff_in_seconds
self.hours = flt(time_diff_in_seconds(self.to_time, self.from_time)) / 3600
def validate_time_log_for(self):
if not self.for_manufacturing:
for fld in ["production_order", "operation", "workstation", "completed_qty"]:
self.set(fld, None)
else:
self.activity_type=None
def check_workstation_timings(self):
"""Checks if **Time Log** is between operating hours of the **Workstation**."""
if self.workstation and self.from_time and self.to_time:
from erpnext.manufacturing.doctype.workstation.workstation import check_if_within_operating_hours
check_if_within_operating_hours(self.workstation, self.operation, self.from_time, self.to_time)
def validate_production_order(self):
"""Throws 'NotSubmittedError' if **production order** is not submitted. """
if self.production_order:
if frappe.db.get_value("Production Order", self.production_order, "docstatus") != 1 :
frappe.throw(_("You can make a time log only against a submitted production order"), NotSubmittedError)
def update_production_order(self):
"""Updates `start_date`, `end_date`, `status` for operation in Production Order."""
if self.production_order and self.for_manufacturing:
if not self.operation_id:
frappe.throw(_("Operation ID not set"))
dates = self.get_operation_start_end_time()
summary = self.get_time_log_summary()
pro = frappe.get_doc("Production Order", self.production_order)
for o in pro.operations:
if o.name == self.operation_id:
o.actual_start_time = dates.start_date
o.actual_end_time = dates.end_date
o.completed_qty = summary.completed_qty
o.actual_operation_time = summary.mins
break
pro.flags.ignore_validate_update_after_submit = True
pro.update_operation_status()
pro.calculate_operating_cost()
pro.set_actual_dates()
pro.save()
def get_operation_start_end_time(self):
"""Returns Min From and Max To Dates of Time Logs against a specific Operation. """
return frappe.db.sql("""select min(from_time) as start_date, max(to_time) as end_date from `tabTime Log`
where production_order = %s and operation = %s and docstatus=1""",
(self.production_order, self.operation), as_dict=1)[0]
def move_to_next_day(self):
"""Move start and end time one day forward"""
self.from_time = get_datetime(self.from_time) + relativedelta(day=1)
def move_to_next_working_slot(self):
"""Move to next working slot from workstation"""
workstation = frappe.get_doc("Workstation", self.workstation)
slot_found = False
for working_hour in workstation.working_hours:
if get_datetime(self.from_time).time() < get_time(working_hour.start_time):
self.from_time = getdate(self.from_time).strftime("%Y-%m-%d") + " " + working_hour.start_time
slot_found = True
break
if not slot_found:
# later than last time
self.from_time = getdate(self.from_time).strftime("%Y-%m-%d") + " " + workstation.working_hours[0].start_time
self.move_to_next_day()
def move_to_next_non_overlapping_slot(self):
"""If in overlap, set start as the end point of the overlapping time log"""
overlapping = self.get_overlap_for("workstation") \
or self.get_overlap_for("employee") \
or self.get_overlap_for("user")
if not overlapping:
frappe.throw("Logical error: Must find overlapping")
self.from_time = get_datetime(overlapping.to_time) + get_mins_between_operations()
def get_time_log_summary(self):
"""Returns 'Actual Operating Time'. """
return frappe.db.sql("""select
sum(hours*60) as mins, sum(ifnull(completed_qty, 0)) as completed_qty
from `tabTime Log`
where production_order = %s and operation_id = %s and docstatus=1""",
(self.production_order, self.operation_id), as_dict=1)[0]
def validate_manufacturing(self):
if self.for_manufacturing:
if not self.production_order:
frappe.throw(_("Production Order is Mandatory"))
if not self.completed_qty:
self.completed_qty = 0
production_order = frappe.get_doc("Production Order", self.production_order)
pending_qty = flt(production_order.qty) - flt(production_order.produced_qty)
if flt(self.completed_qty) > pending_qty:
frappe.throw(_("Completed Qty cannot be more than {0} for operation {1}").format(pending_qty, self.operation),
OverProductionLoggedError)
else:
self.production_order = None
self.operation = None
self.quantity = None
def update_cost(self):
rate = get_activity_cost(self.employee, self.activity_type)
if rate:
self.costing_rate = rate.get('costing_rate')
self.billing_rate = rate.get('billing_rate')
self.costing_amount = self.costing_rate * self.hours
if self.billable:
self.billing_amount = self.billing_rate * self.hours
else:
self.billing_amount = 0
def validate_task(self):
# if a time log is being created against a project without production order
if (self.project and not self.production_order) and not self.task:
frappe.throw(_("Task is Mandatory if Time Log is against a project"))
def update_task(self):
if self.task and frappe.db.exists("Task", self.task):
task = frappe.get_doc("Task", self.task)
task.update_time_and_costing()
task.save()
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters like workstation, project etc.
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Time Log", filters)
data = frappe.db.sql("""select name, from_time, to_time,
activity_type, task, project, production_order, workstation from `tabTime Log`
where docstatus < 2 and ( from_time between %(start)s and %(end)s or to_time between %(start)s and %(end)s )
{conditions}""".format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
for d in data:
d.title = d.name + ": " + (d.activity_type or d.production_order or "")
if d.task:
d.title += " for Task: " + d.task
if d.project:
d.title += " for Project: " + d.project
return data
@frappe.whitelist()
def get_activity_cost(employee=None, activity_type=None):
rate = frappe.db.sql("""select costing_rate, billing_rate from `tabActivity Cost` where employee= %s
and activity_type= %s""", (employee, activity_type), as_dict=1)
if not rate:
rate = frappe.db.sql("""select costing_rate, billing_rate from `tabActivity Cost` where ifnull(employee, '')=''
and activity_type= %s""", (activity_type), as_dict=1)
return rate[0] if rate else {}
| agpl-3.0 | -3,917,239,653,648,764,400 | 35.143885 | 115 | 0.703225 | false |
eptmp3/Sick-Beard | sickbeard/postProcessor.py | 21 | 48424 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import glob
import os
import re
import shlex
import subprocess
import sickbeard
import hashlib
from sickbeard import db
from sickbeard import classes
from sickbeard import common
from sickbeard import exceptions
from sickbeard import helpers
from sickbeard import history
from sickbeard import logger
from sickbeard import notifiers
from sickbeard import show_name_helpers
from sickbeard import scene_exceptions
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from sickbeard.name_parser.parser import NameParser, InvalidNameException
from lib.tvdb_api import tvdb_api, tvdb_exceptions
class PostProcessor(object):
"""
A class which will process a media file according to the post processing settings in the config.
"""
EXISTS_LARGER = 1
EXISTS_SAME = 2
EXISTS_SMALLER = 3
DOESNT_EXIST = 4
IGNORED_FILESTRINGS = [ "/.AppleDouble/", ".DS_Store" ]
NZB_NAME = 1
FOLDER_NAME = 2
FILE_NAME = 3
def __init__(self, file_path, nzb_name = None):
"""
Creates a new post processor with the given file path and optionally an NZB name.
file_path: The path to the file to be processed
nzb_name: The name of the NZB which resulted in this file being downloaded (optional)
"""
# absolute path to the folder that is being processed
self.folder_path = ek.ek(os.path.dirname, ek.ek(os.path.abspath, file_path))
# full path to file
self.file_path = file_path
# file name only
self.file_name = ek.ek(os.path.basename, file_path)
# the name of the folder only
self.folder_name = ek.ek(os.path.basename, self.folder_path)
# name of the NZB that resulted in this folder
self.nzb_name = nzb_name
self.in_history = False
self.release_group = None
self.is_proper = False
self.good_results = {self.NZB_NAME: False,
self.FOLDER_NAME: False,
self.FILE_NAME: False}
self.log = ''
def _log(self, message, level=logger.MESSAGE):
"""
A wrapper for the internal logger which also keeps track of messages and saves them to a string for later.
message: The string to log (unicode)
level: The log level to use (optional)
"""
logger.log(message, level)
self.log += message + '\n'
def _checkForExistingFile(self, existing_file):
"""
Checks if a file exists already and if it does whether it's bigger or smaller than
the file we are post processing
existing_file: The file to compare to
Returns:
DOESNT_EXIST if the file doesn't exist
EXISTS_LARGER if the file exists and is larger than the file we are post processing
EXISTS_SMALLER if the file exists and is smaller than the file we are post processing
EXISTS_SAME if the file exists and is the same size as the file we are post processing
"""
if not existing_file:
self._log(u"There is no existing file so there's no worries about replacing it", logger.DEBUG)
return PostProcessor.DOESNT_EXIST
# if the new file exists, return the appropriate code depending on the size
if ek.ek(os.path.isfile, existing_file):
# see if it's bigger than our old file
if ek.ek(os.path.getsize, existing_file) > ek.ek(os.path.getsize, self.file_path):
self._log(u"File "+existing_file+" is larger than "+self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_LARGER
elif ek.ek(os.path.getsize, existing_file) == ek.ek(os.path.getsize, self.file_path):
self._log(u"File "+existing_file+" is the same size as "+self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_SAME
else:
self._log(u"File "+existing_file+" is smaller than "+self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_SMALLER
else:
self._log(u"File "+existing_file+" doesn't exist so there's no worries about replacing it", logger.DEBUG)
return PostProcessor.DOESNT_EXIST
def _list_associated_files(self, file_path, subtitles_only=False):
"""
For a given file path searches for files with the same name but different extension and returns their absolute paths
file_path: The file to check for associated files
Returns: A list containing all files which are associated to the given file
"""
if not file_path:
return []
file_path_list = []
if subtitles_only:
head, tail = os.path.split(subtitles_only)
base_tail = tail.rpartition('.')[0]+'.'
base_name = os.path.join(file_path,base_tail)
else:
base_name = file_path.rpartition('.')[0]+'.'
# don't strip it all and use cwd by accident
if not base_name:
return []
# don't confuse glob with chars we didn't mean to use
base_name = re.sub(r'[\[\]\*\?]', r'[\g<0>]', base_name)
for associated_file_path in ek.ek(glob.glob, base_name+'*'):
# only add associated to list
if associated_file_path == file_path:
continue
# only list it if the only non-shared part is the extension or if it is a subtitle
if '.' in associated_file_path[len(base_name):] and not associated_file_path[len(associated_file_path)-3:] in common.subtitleExtensions:
continue
if subtitles_only and not associated_file_path[len(associated_file_path)-3:] in common.subtitleExtensions:
continue
file_path_list.append(associated_file_path)
return file_path_list
def _list_dummy_files(self, file_path, oribasename=None,directory=None):
"""
For a given file path searches for dummy files
Returns: deletes all files which are dummy to the given file
"""
if not file_path:
return []
dumb_files_list =[]
if oribasename:
base_name=oribasename
else:
base_name = file_path.rpartition('.')[0]+'.'
# don't strip it all and use cwd by accident
if not base_name:
return []
# don't confuse glob with chars we didn't mean to use
base_name = re.sub(r'[\[\]\*\?]', r'[\g<0>]', base_name)
if directory =="d":
cur_dir=file_path
else:
cur_dir=self.folder_path
ass_files=ek.ek(glob.glob, base_name+'*')
dum_files=ek.ek(glob.glob, cur_dir+'\*')
for dummy_file_path in dum_files:
if os.path.isdir(dummy_file_path):
self._list_dummy_files(dummy_file_path, base_name,"d")
elif dummy_file_path==self.file_path or dummy_file_path[len(dummy_file_path)-3:] in common.mediaExtensions or sickbeard.MOVE_ASSOCIATED_FILES or (sickbeard.TORRENT_DOWNLOAD_DIR != "" and sickbeard.TORRENT_DOWNLOAD_DIR in cur_dir and sickbeard.PROCESS_METHOD in ['copy','hardlink','symlink']):
continue
else:
dumb_files_list.append(dummy_file_path)
for cur_file in dumb_files_list:
self._log(u"Deleting file "+cur_file, logger.DEBUG)
if ek.ek(os.path.isfile, cur_file):
ek.ek(os.remove, cur_file)
return
def _delete(self, file_path, associated_files=False):
"""
Deletes the file and optionally all associated files.
file_path: The file to delete
associated_files: True to delete all files which differ only by extension, False to leave them
"""
if not file_path:
return
# figure out which files we want to delete
file_list = [file_path]
self._list_dummy_files(file_path)
if associated_files:
file_list = file_list + self._list_associated_files(file_path)
if not file_list:
self._log(u"There were no files associated with " + file_path + ", not deleting anything", logger.DEBUG)
return
# delete the file and any other files which we want to delete
for cur_file in file_list:
self._log(u"Deleting file "+cur_file, logger.DEBUG)
if ek.ek(os.path.isfile, cur_file):
ek.ek(os.remove, cur_file)
# do the library update for synoindex
notifiers.synoindex_notifier.deleteFile(cur_file)
def _combined_file_operation (self, file_path, new_path, new_base_name, associated_files=False, action=None, subtitles=False):
"""
Performs a generic operation (move or copy) on a file. Can rename the file as well as change its location,
and optionally move associated files too.
file_path: The full path of the media file to act on
new_path: Destination path where we want to move/copy the file to
new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.
associated_files: Boolean, whether we should copy similarly-named files too
action: function that takes an old path and new path and does an operation with them (move/copy)
"""
if not action:
self._log(u"Must provide an action for the combined file operation", logger.ERROR)
return
file_list = [file_path]
self._list_dummy_files(file_path)
if associated_files:
file_list = file_list + self._list_associated_files(file_path)
elif subtitles:
file_list = file_list + self._list_associated_files(file_path, True)
if not file_list:
self._log(u"There were no files associated with " + file_path + ", not moving anything", logger.DEBUG)
return
# deal with all files
for cur_file_path in file_list:
cur_file_name = ek.ek(os.path.basename, cur_file_path)
# get the extension
cur_extension = cur_file_path.rpartition('.')[-1]
# check if file have language of subtitles
if cur_extension in common.subtitleExtensions:
cur_lang = cur_file_path.rpartition('.')[0].rpartition('.')[-1]
if cur_lang in sickbeard.SUBTITLES_LANGUAGES:
cur_extension = cur_lang + '.' + cur_extension
# replace .nfo with .nfo-orig to avoid conflicts
if cur_extension == 'nfo':
cur_extension = 'nfo-orig'
# If new base name then convert name
if new_base_name:
new_file_name = new_base_name +'.' + cur_extension
# if we're not renaming we still want to change extensions sometimes
else:
new_file_name = helpers.replaceExtension(cur_file_name, cur_extension)
if sickbeard.SUBTITLES_DIR and cur_extension in common.subtitleExtensions:
subs_new_path = ek.ek(os.path.join, new_path, sickbeard.SUBTITLES_DIR)
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
new_file_path = ek.ek(os.path.join, subs_new_path, new_file_name)
else:
if sickbeard.SUBTITLES_DIR_SUB and cur_extension in common.subtitleExtensions:
subs_new_path = os.path.join(os.path.dirname(file.path),"Subs")
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
new_file_path = ek.ek(os.path.join, subs_new_path, new_file_name)
else :
new_file_path = ek.ek(os.path.join, new_path, new_file_name)
action(cur_file_path, new_file_path)
def _move(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
"""
file_path: The full path of the media file to move
new_path: Destination path where we want to move the file to
new_base_name: The base filename (no extension) to use during the move. Use None to keep the same name.
associated_files: Boolean, whether we should move similarly-named files too
"""
def _int_move(cur_file_path, new_file_path):
self._log(u"Moving file from "+cur_file_path+" to "+new_file_path, logger.DEBUG)
try:
helpers.moveFile(cur_file_path, new_file_path)
helpers.chmodAsParent(new_file_path)
except (IOError, OSError), e:
self._log("Unable to move file "+cur_file_path+" to "+new_file_path+": "+ex(e), logger.ERROR)
raise e
self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_move, subtitles=subtitles)
def _copy(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
"""
file_path: The full path of the media file to copy
new_path: Destination path where we want to copy the file to
new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.
associated_files: Boolean, whether we should copy similarly-named files too
"""
def _int_copy (cur_file_path, new_file_path):
self._log(u"Copying file from "+cur_file_path+" to "+new_file_path, logger.DEBUG)
try:
helpers.copyFile(cur_file_path, new_file_path)
helpers.chmodAsParent(new_file_path)
except (IOError, OSError), e:
logger.log("Unable to copy file "+cur_file_path+" to "+new_file_path+": "+ex(e), logger.ERROR)
raise e
self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_copy, subtitles=subtitles)
def _hardlink(self, file_path, new_path, new_base_name, associated_files=False):
"""
file_path: The full path of the media file to move
new_path: Destination path where we want to create a hard linked file
new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.
associated_files: Boolean, whether we should move similarly-named files too
"""
def _int_hard_link(cur_file_path, new_file_path):
self._log(u"Hard linking file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
try:
helpers.hardlinkFile(cur_file_path, new_file_path)
helpers.chmodAsParent(new_file_path)
except (IOError, OSError), e:
self._log("Unable to link file " + cur_file_path + " to " + new_file_path + ": "+ex(e), logger.ERROR)
raise e
self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_hard_link)
def _moveAndSymlink(self, file_path, new_path, new_base_name, associated_files=False):
"""
file_path: The full path of the media file to move
new_path: Destination path where we want to move the file to create a symbolic link to
new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.
associated_files: Boolean, whether we should move similarly-named files too
"""
def _int_move_and_sym_link(cur_file_path, new_file_path):
self._log(u"Moving then symbolic linking file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
try:
helpers.moveAndSymlinkFile(cur_file_path, new_file_path)
helpers.chmodAsParent(new_file_path)
except (IOError, OSError), e:
self._log("Unable to link file " + cur_file_path + " to " + new_file_path + ": " + ex(e), logger.ERROR)
raise e
self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_move_and_sym_link)
def _history_lookup(self):
"""
Look up the NZB name in the history and see if it contains a record for self.nzb_name
Returns a (tvdb_id, season, []) tuple. The first two may be None if none were found.
"""
to_return = (None, None, [])
# if we don't have either of these then there's nothing to use to search the history for anyway
if not self.nzb_name and not self.folder_name:
self.in_history = False
return to_return
# make a list of possible names to use in the search
names = []
if self.nzb_name:
names.append(self.nzb_name)
if '.' in self.nzb_name:
names.append(self.nzb_name.rpartition(".")[0])
if self.folder_name:
names.append(self.folder_name)
myDB = db.DBConnection()
# search the database for a possible match and return immediately if we find one
for curName in names:
sql_results = myDB.select("SELECT * FROM history WHERE resource LIKE ?", [re.sub("[\.\-\ ]", "_", curName)])
if len(sql_results) == 0:
continue
tvdb_id = int(sql_results[0]["showid"])
season = int(sql_results[0]["season"])
self.in_history = True
to_return = (tvdb_id, season, [])
self._log("Found result in history: "+str(to_return), logger.DEBUG)
if curName == self.nzb_name:
self.good_results[self.NZB_NAME] = True
elif curName == self.folder_name:
self.good_results[self.FOLDER_NAME] = True
elif curName == self.file_name:
self.good_results[self.FILE_NAME] = True
return to_return
self.in_history = False
return to_return
def _analyze_name(self, name, file=True):
"""
Takes a name and tries to figure out a show, season, and episode from it.
name: A string which we want to analyze to determine show info from (unicode)
Returns a (tvdb_id, season, [episodes]) tuple. The first two may be None and episodes may be []
if none were found.
"""
logger.log(u"Analyzing name "+repr(name))
to_return = (None, None, [])
if not name:
return to_return
# parse the name to break it into show name, season, and episode
np = NameParser(file)
parse_result = np.parse(name)
self._log("Parsed "+name+" into "+str(parse_result).decode('utf-8'), logger.DEBUG)
if parse_result.air_by_date:
season = -1
episodes = [parse_result.air_date]
else:
season = parse_result.season_number
episodes = parse_result.episode_numbers
to_return = (None, season, episodes)
# do a scene reverse-lookup to get a list of all possible names
name_list = show_name_helpers.sceneToNormalShowNames(parse_result.series_name)
if not name_list:
return (None, season, episodes)
def _finalize(parse_result):
self.release_group = parse_result.release_group
# remember whether it's a proper
if parse_result.extra_info:
self.is_proper = re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', parse_result.extra_info, re.I) != None
# if the result is complete then remember that for later
if parse_result.series_name and parse_result.season_number != None and parse_result.episode_numbers and parse_result.release_group:
test_name = os.path.basename(name)
if test_name == self.nzb_name:
self.good_results[self.NZB_NAME] = True
elif test_name == self.folder_name:
self.good_results[self.FOLDER_NAME] = True
elif test_name == self.file_name:
self.good_results[self.FILE_NAME] = True
else:
logger.log(u"Nothing was good, found "+repr(test_name)+" and wanted either "+repr(self.nzb_name)+", "+repr(self.folder_name)+", or "+repr(self.file_name))
else:
logger.log("Parse result not suficent(all folowing have to be set). will not save release name", logger.DEBUG)
logger.log("Parse result(series_name): " + str(parse_result.series_name), logger.DEBUG)
logger.log("Parse result(season_number): " + str(parse_result.season_number), logger.DEBUG)
logger.log("Parse result(episode_numbers): " + str(parse_result.episode_numbers), logger.DEBUG)
logger.log("Parse result(release_group): " + str(parse_result.release_group), logger.DEBUG)
# for each possible interpretation of that scene name
for cur_name in name_list:
self._log(u"Checking scene exceptions for a match on "+cur_name, logger.DEBUG)
scene_id = scene_exceptions.get_scene_exception_by_name(cur_name)
if scene_id:
self._log(u"Scene exception lookup got tvdb id "+str(scene_id)+u", using that", logger.DEBUG)
_finalize(parse_result)
return (scene_id, season, episodes)
# see if we can find the name directly in the DB, if so use it
for cur_name in name_list:
self._log(u"Looking up "+cur_name+u" in the DB", logger.DEBUG)
db_result = helpers.searchDBForShow(cur_name)
if db_result:
self._log(u"Lookup successful, using tvdb id "+str(db_result[0]), logger.DEBUG)
_finalize(parse_result)
return (int(db_result[0]), season, episodes)
# see if we can find the name with a TVDB lookup
for cur_name in name_list:
try:
t = tvdb_api.Tvdb(custom_ui=classes.ShowListUI, **sickbeard.TVDB_API_PARMS)
self._log(u"Looking up name "+cur_name+u" on TVDB", logger.DEBUG)
showObj = t[cur_name]
except (tvdb_exceptions.tvdb_exception):
# if none found, search on all languages
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
ltvdb_api_parms['search_all_languages'] = True
t = tvdb_api.Tvdb(custom_ui=classes.ShowListUI, **ltvdb_api_parms)
self._log(u"Looking up name "+cur_name+u" in all languages on TVDB", logger.DEBUG)
showObj = t[cur_name]
except (tvdb_exceptions.tvdb_exception, IOError):
pass
continue
except (IOError):
continue
self._log(u"Lookup successful, using tvdb id "+str(showObj["id"]), logger.DEBUG)
_finalize(parse_result)
return (int(showObj["id"]), season, episodes)
_finalize(parse_result)
return to_return
def _find_info(self):
"""
For a given file try to find the showid, season, and episode.
"""
tvdb_id = season = None
episodes = []
# try to look up the nzb in history
attempt_list = [self._history_lookup,
# try to analyze the nzb name
lambda: self._analyze_name(self.nzb_name),
# try to analyze the file name
lambda: self._analyze_name(self.file_name),
# try to analyze the dir name
lambda: self._analyze_name(self.folder_name),
# try to analyze the file+dir names together
lambda: self._analyze_name(self.file_path),
# try to analyze the dir + file name together as one name
lambda: self._analyze_name(self.folder_name + u' ' + self.file_name)
]
# attempt every possible method to get our info
for cur_attempt in attempt_list:
try:
(cur_tvdb_id, cur_season, cur_episodes) = cur_attempt()
except InvalidNameException, e:
logger.log(u"Unable to parse, skipping: "+ex(e), logger.DEBUG)
continue
# if we already did a successful history lookup then keep that tvdb_id value
if cur_tvdb_id and not (self.in_history and tvdb_id):
tvdb_id = cur_tvdb_id
if cur_season != None:
season = cur_season
if cur_episodes:
episodes = cur_episodes
# for air-by-date shows we need to look up the season/episode from tvdb
if season == -1 and tvdb_id and episodes:
self._log(u"Looks like this is an air-by-date show, attempting to convert the date to season/episode", logger.DEBUG)
# try to get language set for this show
tvdb_lang = None
try:
showObj = helpers.findCertainShow(sickbeard.showList, tvdb_id)
if(showObj != None):
tvdb_lang = showObj.lang
except exceptions.MultipleShowObjectsException:
raise #TODO: later I'll just log this, for now I want to know about it ASAP
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(**ltvdb_api_parms)
epObj = t[tvdb_id].airedOn(episodes[0])[0]
season = int(epObj["seasonnumber"])
episodes = [int(epObj["episodenumber"])]
self._log(u"Got season " + str(season) + " episodes " + str(episodes), logger.DEBUG)
except tvdb_exceptions.tvdb_episodenotfound, e:
self._log(u"Unable to find episode with date " + str(episodes[0]) + u" for show " + str(tvdb_id) + u", skipping", logger.DEBUG)
# we don't want to leave dates in the episode list if we couldn't convert them to real episode numbers
episodes = []
continue
except tvdb_exceptions.tvdb_error, e:
logger.log(u"Unable to contact TVDB: " + ex(e), logger.WARNING)
episodes = []
continue
# if there's no season then we can hopefully just use 1 automatically
elif season == None and tvdb_id:
myDB = db.DBConnection()
numseasonsSQlResult = myDB.select("SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0", [tvdb_id])
if int(numseasonsSQlResult[0][0]) == 1 and season == None:
self._log(u"Don't have a season number, but this show appears to only have 1 season, setting seasonnumber to 1...", logger.DEBUG)
season = 1
if tvdb_id and season != None and episodes:
season, episodes = self._sceneToTVDBNumbers(tvdb_id, season, episodes)
return (tvdb_id, season, episodes)
def _sceneToTVDBNumbers(self, tvdb_id, season, episodes):
self._log(u"This looks like a scene release converting scene numbers to tvdb numbers", logger.DEBUG)
ep_obj = self._get_ep_obj(tvdb_id, season, episodes, scene=True)
if ep_obj:
newEpisodeNumbers = []
for curEp in [ep_obj] + ep_obj.relatedEps:
newEpisodeNumbers.append(curEp.episode)
return (ep_obj.season, newEpisodeNumbers)
return (season, episodes)
def _get_ep_obj(self, tvdb_id, season, episodes, scene=False):
"""
Retrieve the TVEpisode object requested.
tvdb_id: The TVDBID of the show (int)
season: The season of the episode (int)
episodes: A list of episodes to find (list of ints)
If the episode(s) can be found then a TVEpisode object with the correct related eps will
be instantiated and returned. If the episode can't be found then None will be returned.
"""
show_obj = None
sceneMsg = ""
if scene:
sceneMsg = "(scene numbers) "
self._log(u"Loading show object for tvdb_id "+str(tvdb_id), logger.DEBUG)
# find the show in the showlist
try:
show_obj = helpers.findCertainShow(sickbeard.showList, tvdb_id)
except exceptions.MultipleShowObjectsException:
raise #TODO: later I'll just log this, for now I want to know about it ASAP
# if we can't find the show then there's nothing we can really do
if not show_obj:
self._log(("This show (tvdb_id=%d) isn't in your list, you need to add it to SB before post-processing an episode" % tvdb_id), logger.ERROR)
raise exceptions.PostProcessingFailed()
root_ep = None
for cur_episode in episodes:
episode = int(cur_episode)
self._log(u"Retrieving episode object for " + sceneMsg + str(season) + "x" + str(episode), logger.DEBUG)
# now that we've figured out which episode this file is just load it manually
try:
myDB = db.DBConnection()
is_scene = myDB.select("SELECT scene_episode FROM tv_episodes WHERE showid = ? AND scene_season = ? AND scene_episode = ?", [tvdb_id, season, episode])
if is_scene and scene:
curEp = show_obj.getEpisode(season, episode, scene=True)
else:
curEp = show_obj.getEpisode(season, episode, scene=False)
except exceptions.EpisodeNotFoundException, e:
self._log(u"Unable to create episode: "+ex(e), logger.DEBUG)
raise exceptions.PostProcessingFailed()
# associate all the episodes together under a single root episode
if root_ep == None:
root_ep = curEp
if not scene:
root_ep.relatedEps = []
elif curEp not in root_ep.relatedEps:
self._log("Adding a related episode: " + str(curEp.season) + "x" + str(curEp.episode))
root_ep.relatedEps.append(curEp)
return root_ep
def _get_quality(self, ep_obj):
"""
Determines the quality of the file that is being post processed, first by checking if it is directly
available in the TVEpisode's status or otherwise by parsing through the data available.
ep_obj: The TVEpisode object related to the file we are post processing
Returns: A quality value found in common.Quality
"""
ep_quality = common.Quality.UNKNOWN
# if there is a quality available in the status then we don't need to bother guessing from the filename
if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_FRENCH:
oldStatus, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status) #@UnusedVariable
if ep_quality != common.Quality.UNKNOWN:
self._log(u"The old status had a quality in it, using that: "+common.Quality.qualityStrings[ep_quality], logger.DEBUG)
return ep_quality
# nzb name is the most reliable if it exists, followed by folder name and lastly file name
name_list = [self.nzb_name, self.folder_name, self.file_name]
# search all possible names for our new quality, in case the file or dir doesn't have it
for cur_name in name_list:
# some stuff might be None at this point still
if not cur_name:
continue
ep_quality = common.Quality.nameQuality(cur_name)
self._log(u"Looking up quality for name "+cur_name+u", got "+common.Quality.qualityStrings[ep_quality], logger.DEBUG)
# if we find a good one then use it
if ep_quality != common.Quality.UNKNOWN:
logger.log(cur_name+u" looks like it has quality "+common.Quality.qualityStrings[ep_quality]+", using that", logger.DEBUG)
return ep_quality
# if we didn't get a quality from one of the names above, try assuming from each of the names
ep_quality = common.Quality.assumeQuality(self.file_name)
self._log(u"Guessing quality for name "+self.file_name+u", got "+common.Quality.qualityStrings[ep_quality], logger.DEBUG)
if ep_quality != common.Quality.UNKNOWN:
logger.log(self.file_name+u" looks like it has quality "+common.Quality.qualityStrings[ep_quality]+", using that", logger.DEBUG)
return ep_quality
return ep_quality
def _run_extra_scripts(self, ep_obj):
"""
Executes any extra scripts defined in the config.
ep_obj: The object to use when calling the extra script
"""
for curScriptName in sickbeard.EXTRA_SCRIPTS:
# generate a safe command line string to execute the script and provide all the parameters
script_cmd = shlex.split(curScriptName) + [ep_obj.location, self.file_path, str(ep_obj.show.tvdbid), str(ep_obj.season), str(ep_obj.episode), str(ep_obj.airdate)]
# use subprocess to run the command and capture output
self._log(u"Executing command "+str(script_cmd))
self._log(u"Absolute path to script: "+ek.ek(os.path.abspath, script_cmd[0]), logger.DEBUG)
try:
p = subprocess.Popen(script_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR)
out, err = p.communicate() #@UnusedVariable
self._log(u"Script result: "+str(out), logger.DEBUG)
except OSError, e:
self._log(u"Unable to run extra_script: "+ex(e))
def _is_priority(self, ep_obj, new_ep_quality):
"""
Determines if the episode is a priority download or not (if it is expected). Episodes which are expected
(snatched) or larger than the existing episode are priority, others are not.
ep_obj: The TVEpisode object in question
new_ep_quality: The quality of the episode that is being processed
Returns: True if the episode is priority, False otherwise.
"""
# if SB downloaded this on purpose then this is a priority download
if self.in_history or ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_FRENCH:
self._log(u"SB snatched this episode so I'm marking it as priority", logger.DEBUG)
return True
# if the user downloaded it manually and it's higher quality than the existing episode then it's priority
if new_ep_quality > ep_obj and new_ep_quality != common.Quality.UNKNOWN:
self._log(u"This was manually downloaded but it appears to be better quality than what we have so I'm marking it as priority", logger.DEBUG)
return True
# if the user downloaded it manually and it appears to be a PROPER/REPACK then it's priority
old_ep_status, old_ep_quality = common.Quality.splitCompositeStatus(ep_obj.status) #@UnusedVariable
if self.is_proper and new_ep_quality >= old_ep_quality:
self._log(u"This was manually downloaded but it appears to be a proper so I'm marking it as priority", logger.DEBUG)
return True
return False
def process(self):
"""
Post-process a given file
"""
self._log(u"Processing " + self.file_path + " (" + str(self.nzb_name) + ")")
if os.path.isdir(self.file_path):
self._log(u"File " + self.file_path + " seems to be a directory")
return False
for ignore_file in self.IGNORED_FILESTRINGS:
if ignore_file in self.file_path:
self._log(u"File " + self.file_path + " is ignored type, skipping")
return False
# reset per-file stuff
self.in_history = False
# try to find the file info
(tvdb_id, season, episodes) = self._find_info()
# if we don't have it then give up
if not tvdb_id or season == None or not episodes:
return False
# retrieve/create the corresponding TVEpisode objects
ep_obj = self._get_ep_obj(tvdb_id, season, episodes)
# get the quality of the episode we're processing
new_ep_quality = self._get_quality(ep_obj)
logger.log(u"Quality of the episode we're processing: " + str(new_ep_quality), logger.DEBUG)
# see if this is a priority download (is it snatched, in history, or PROPER)
priority_download = self._is_priority(ep_obj, new_ep_quality)
self._log(u"Is ep a priority download: " + str(priority_download), logger.DEBUG)
# set the status of the episodes
for curEp in [ep_obj] + ep_obj.relatedEps:
curEp.status = common.Quality.compositeStatus(common.SNATCHED, new_ep_quality)
# check for an existing file
existing_file_status = self._checkForExistingFile(ep_obj.location)
# if it's not priority then we don't want to replace smaller files in case it was a mistake
if not priority_download:
# if there's an existing file that we don't want to replace stop here
if existing_file_status in (PostProcessor.EXISTS_LARGER, PostProcessor.EXISTS_SAME):
self._log(u"File exists and we are not going to replace it because it's not smaller, quitting post-processing", logger.DEBUG)
return False
elif existing_file_status == PostProcessor.EXISTS_SMALLER:
self._log(u"File exists and is smaller than the new file so I'm going to replace it", logger.DEBUG)
elif existing_file_status != PostProcessor.DOESNT_EXIST:
self._log(u"Unknown existing file status. This should never happen, please log this as a bug.", logger.ERROR)
return False
# if the file is priority then we're going to replace it even if it exists
else:
self._log(u"This download is marked a priority download so I'm going to replace an existing file if I find one", logger.DEBUG)
# delete the existing file (and company)
for cur_ep in [ep_obj] + ep_obj.relatedEps:
try:
self._delete(cur_ep.location, associated_files=True)
# clean up any left over folders
if cur_ep.location:
helpers.delete_empty_folders(ek.ek(os.path.dirname, cur_ep.location), keep_dir=ep_obj.show._location)
except (OSError, IOError):
raise exceptions.PostProcessingFailed("Unable to delete the existing files")
# if the show directory doesn't exist then make it if allowed
if not ek.ek(os.path.isdir, ep_obj.show._location) and sickbeard.CREATE_MISSING_SHOW_DIRS:
self._log(u"Show directory doesn't exist, creating it", logger.DEBUG)
try:
ek.ek(os.mkdir, ep_obj.show._location)
# do the library update for synoindex
notifiers.synoindex_notifier.addFolder(ep_obj.show._location)
except (OSError, IOError):
raise exceptions.PostProcessingFailed("Unable to create the show directory: " + ep_obj.show._location)
# get metadata for the show (but not episode because it hasn't been fully processed)
ep_obj.show.writeMetadata(True)
# update the ep info before we rename so the quality & release name go into the name properly
for cur_ep in [ep_obj] + ep_obj.relatedEps:
with cur_ep.lock:
cur_release_name = None
# use the best possible representation of the release name
if self.good_results[self.NZB_NAME]:
cur_release_name = self.nzb_name
if cur_release_name.lower().endswith('.nzb'):
cur_release_name = cur_release_name.rpartition('.')[0]
elif self.good_results[self.FOLDER_NAME]:
cur_release_name = self.folder_name
elif self.good_results[self.FILE_NAME]:
cur_release_name = self.file_name
# take the extension off the filename, it's not needed
if '.' in self.file_name:
cur_release_name = self.file_name.rpartition('.')[0]
if cur_release_name:
self._log("Found release name " + cur_release_name, logger.DEBUG)
cur_ep.release_name = cur_release_name
else:
logger.log("good results: " + repr(self.good_results), logger.DEBUG)
cur_ep.status = common.Quality.compositeStatus(common.DOWNLOADED, new_ep_quality)
cur_ep.saveToDB()
# find the destination folder
try:
proper_path = ep_obj.proper_path()
proper_absolute_path = ek.ek(os.path.join, ep_obj.show.location, proper_path)
dest_path = ek.ek(os.path.dirname, proper_absolute_path)
except exceptions.ShowDirNotFoundException:
raise exceptions.PostProcessingFailed(u"Unable to post-process an episode if the show dir doesn't exist, quitting")
self._log(u"Destination folder for this episode: " + dest_path, logger.DEBUG)
# create any folders we need
helpers.make_dirs(dest_path)
# figure out the base name of the resulting episode file
if sickbeard.RENAME_EPISODES:
orig_extension = self.file_name.rpartition('.')[-1]
new_base_name = ek.ek(os.path.basename, proper_path)
new_file_name = new_base_name + '.' + orig_extension
else:
# if we're not renaming then there's no new base name, we'll just use the existing name
new_base_name = None
new_file_name = self.file_name
# with open(self.file_path, 'rb') as fh:
# m = hashlib.md5()
# while True:
# data = fh.read(8192)
# if not data:
# break
# m.update(data)
# MD5 = m.hexdigest()
m = hashlib.md5()
m.update(self.file_path)
MD5 = m.hexdigest()
try:
path,file=os.path.split(self.file_path)
if sickbeard.TORRENT_DOWNLOAD_DIR in path and sickbeard.TORRENT_DOWNLOAD_DIR != "":
#Action possible pour les torrent
if sickbeard.PROCESS_METHOD == "copy":
self._copy(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES)
elif sickbeard.PROCESS_METHOD == "move":
self._move(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES)
elif sickbeard.PROCESS_METHOD == "hardlink":
self._hardlink(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES)
elif sickbeard.PROCESS_METHOD == "symlink":
self._moveAndSymlink(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES)
else:
logger.log(u"Unknown process method: " + str(sickbeard.PROCESS_METHOD), logger.ERROR)
raise exceptions.PostProcessingFailed("Unable to move the files to their new home")
else:
#action pour le reste des fichier
if sickbeard.KEEP_PROCESSED_DIR:
self._copy(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES)
else:
self._move(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES)
except (OSError, IOError):
raise exceptions.PostProcessingFailed("Unable to move the files to their new home")
myDB = db.DBConnection()
## INSERT MD5 of file
controlMD5 = {"episode_id" : int(ep_obj.tvdbid) }
NewValMD5 = {"filename" : new_base_name ,
"md5" : MD5
}
myDB.upsert("processed_files", NewValMD5, controlMD5)
# put the new location in the database
for cur_ep in [ep_obj] + ep_obj.relatedEps:
with cur_ep.lock:
cur_ep.location = ek.ek(os.path.join, dest_path, new_file_name)
cur_ep.saveToDB()
# log it to history
history.logDownload(ep_obj, self.file_path, new_ep_quality, self.release_group)
# download subtitles
if sickbeard.USE_SUBTITLES and ep_obj.show.subtitles:
cur_ep.downloadSubtitles()
# send notifications
notifiers.notify_download(ep_obj.prettyName())
# generate nfo/tbn
ep_obj.createMetaFiles()
ep_obj.saveToDB()
# do the library update for XBMC
notifiers.xbmc_notifier.update_library(ep_obj.show.name)
# do the library update for Plex
notifiers.plex_notifier.update_library()
# do the library update for NMJ
# nmj_notifier kicks off its library update when the notify_download is issued (inside notifiers)
# do the library update for Synology Indexer
notifiers.synoindex_notifier.addFile(ep_obj.location)
# do the library update for pyTivo
notifiers.pytivo_notifier.update_library(ep_obj)
# do the library update for Trakt
notifiers.trakt_notifier.update_library(ep_obj)
self._run_extra_scripts(ep_obj)
return True
| gpl-3.0 | 1,425,806,893,186,517,200 | 44.899526 | 304 | 0.587498 | false |
alanjw/GreenOpenERP-Win-X86 | python/Lib/test/test_dict.py | 9 | 20894 | import unittest
from test import test_support
import UserDict, random, string
import gc, weakref
class DictTest(unittest.TestCase):
def test_constructor(self):
# calling built-in types without argument must return empty
self.assertEqual(dict(), {})
self.assertIsNot(dict(), {})
def test_literal_constructor(self):
# check literal constructor for different sized dicts
# (to exercise the BUILD_MAP oparg).
for n in (0, 1, 6, 256, 400):
items = [(''.join(random.sample(string.letters, 8)), i)
for i in range(n)]
random.shuffle(items)
formatted_items = ('{!r}: {:d}'.format(k, v) for k, v in items)
dictliteral = '{' + ', '.join(formatted_items) + '}'
self.assertEqual(eval(dictliteral), dict(items))
def test_bool(self):
self.assertIs(not {}, True)
self.assertTrue({1: 2})
self.assertIs(bool({}), False)
self.assertIs(bool({1: 2}), True)
def test_keys(self):
d = {}
self.assertEqual(d.keys(), [])
d = {'a': 1, 'b': 2}
k = d.keys()
self.assertTrue(d.has_key('a'))
self.assertTrue(d.has_key('b'))
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = {}
self.assertEqual(d.values(), [])
d = {1:2}
self.assertEqual(d.values(), [2])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = {}
self.assertEqual(d.items(), [])
d = {1:2}
self.assertEqual(d.items(), [(1, 2)])
self.assertRaises(TypeError, d.items, None)
def test_has_key(self):
d = {}
self.assertFalse(d.has_key('a'))
d = {'a': 1, 'b': 2}
k = d.keys()
k.sort()
self.assertEqual(k, ['a', 'b'])
self.assertRaises(TypeError, d.has_key)
def test_contains(self):
d = {}
self.assertNotIn('a', d)
self.assertFalse('a' in d)
self.assertTrue('a' not in d)
d = {'a': 1, 'b': 2}
self.assertIn('a', d)
self.assertIn('b', d)
self.assertNotIn('c', d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = {}
self.assertEqual(len(d), 0)
d = {'a': 1, 'b': 2}
self.assertEqual(len(d), 2)
def test_getitem(self):
d = {'a': 1, 'b': 2}
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = {}
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = {1:1, 2:2, 3:3}
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = {}
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
class Exc(Exception): pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def next(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def next(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, {}.update, badseq())
self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = {}
self.assertIsNot(d.fromkeys('abc'), d)
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assertIsInstance(dictlike.fromkeys('a'), dictlike)
self.assertIsInstance(dictlike().fromkeys('a'), dictlike)
class mydict(dict):
def __new__(cls):
return UserDict.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assertIsInstance(ud, UserDict.UserDict)
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
# test fast path for dictionary inputs
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0]*6)))
def test_copy(self):
d = {1:1, 2:2, 3:3}
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
self.assertEqual({}.copy(), {})
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
d = {}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
d = {'a': 1, 'b': 2}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = {}
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key0', [])
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assertFalse(copymode < 0 and ta != tb)
self.assertFalse(a)
self.assertFalse(b)
d = {}
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = {}
k, v = 'abc', 'def'
d[k] = v
self.assertRaises(KeyError, d.pop, 'ghi')
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
# verify longs/ints get same value when key > 32 bits
# (for 64-bit archs). See SF bug #689659.
x = 4503599627370496L
y = 4503599627370496
h = {x: 'anything', y: 'something else'}
self.assertEqual(h[x], h[y])
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutatingiteration(self):
# changing dict size during iteration
d = {}
d[1] = 1
with self.assertRaises(RuntimeError):
for i in d:
d[i+1] = 1
def test_repr(self):
d = {}
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = {}
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = {1: BadRepr()}
self.assertRaises(Exc, repr, d)
def test_le(self):
self.assertFalse({} < {})
self.assertFalse({1: 2} < {1L: 2L})
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 42
d1 = {BadCmp(): 1}
d2 = {1: 1}
with self.assertRaises(Exc):
d1 < d2
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertFalse(hasattr(dict, "__missing__"))
self.assertFalse(hasattr({}, "__missing__"))
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
with self.assertRaises(RuntimeError) as c:
e[42]
self.assertEqual(c.exception.args, (42,))
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
with self.assertRaises(KeyError) as c:
f[42]
self.assertEqual(c.exception.args, (42,))
class G(dict):
pass
g = G()
with self.assertRaises(KeyError) as c:
g[42]
self.assertEqual(c.exception.args, (42,))
def test_tuple_keyerror(self):
# SF #1576657
d = {}
with self.assertRaises(KeyError) as c:
d[(1,)]
self.assertEqual(c.exception.args, ((1,),))
def test_bad_key(self):
# Dictionary lookups should fail if __cmp__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __cmp__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = {}
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in ['d[x2] = 2',
'z = d[x2]',
'x2 in d',
'd.has_key(x2)',
'd.get(x2)',
'd.setdefault(x2, 42)',
'd.pop(x2)',
'd.update({x2: 2})']:
with self.assertRaises(CustomException):
exec stmt in locals()
def test_resize1(self):
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = {}
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = {}
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
with self.assertRaises(ZeroDivisionError):
d = {'a': 1 // 0, 'b': None, 'c': None, 'd': None, 'e': None,
'f': None, 'g': None, 'h': None}
d = {}
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for dictiter objects
class C(object):
pass
iterators = (dict.iteritems, dict.itervalues, dict.iterkeys)
for i in iterators:
obj = C()
ref = weakref.ref(obj)
container = {obj: 1}
obj.x = i(container)
del obj, container
gc.collect()
self.assertIs(ref(), None, "Cycle was not collected")
def _not_tracked(self, t):
# Nested containers can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@test_support.cpython_only
def test_track_literals(self):
# Test GC-optimization of dict literals
x, y, z, w = 1.5, "a", (1, None), []
self._not_tracked({})
self._not_tracked({x:(), y:x, z:1})
self._not_tracked({1: "a", "b": 2})
self._not_tracked({1: 2, (None, True, False, ()): int})
self._not_tracked({1: object()})
# Dicts with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked({1: []})
self._tracked({1: ([],)})
self._tracked({1: {}})
self._tracked({1: set()})
@test_support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically-created dicts
class MyObject(object):
pass
x, y, z, w, o = 1.5, "a", (1, object()), [], MyObject()
d = dict()
self._not_tracked(d)
d[1] = "a"
self._not_tracked(d)
d[y] = 2
self._not_tracked(d)
d[z] = 3
self._not_tracked(d)
self._not_tracked(d.copy())
d[4] = w
self._tracked(d)
self._tracked(d.copy())
d[4] = None
self._not_tracked(d)
self._not_tracked(d.copy())
# dd isn't tracked right now, but it may mutate and therefore d
# which contains it must be tracked.
d = dict()
dd = dict()
d[1] = dd
self._not_tracked(dd)
self._tracked(d)
dd[1] = d
self._tracked(dd)
d = dict.fromkeys([x, y, z])
self._not_tracked(d)
dd = dict()
dd.update(d)
self._not_tracked(dd)
d = dict.fromkeys([x, y, z, o])
self._tracked(d)
dd = dict()
dd.update(d)
self._tracked(dd)
d = dict(x=x, y=y, z=z)
self._not_tracked(d)
d = dict(x=x, y=y, z=z, w=w)
self._tracked(d)
d = dict()
d.update(x=x, y=y, z=z)
self._not_tracked(d)
d.update(w=w)
self._tracked(d)
d = dict([(x, y), (z, 1)])
self._not_tracked(d)
d = dict([(x, y), (z, w)])
self._tracked(d)
d = dict()
d.update([(x, y), (z, 1)])
self._not_tracked(d)
d.update([(x, y), (z, w)])
self._tracked(d)
@test_support.cpython_only
def test_track_subtypes(self):
# Dict subtypes are always tracked
class MyDict(dict):
pass
self._tracked(MyDict())
from test import mapping_tests
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = dict
class Dict(dict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = Dict
def test_main():
with test_support.check_py3k_warnings(
('dict(.has_key..| inequality comparisons) not supported in 3.x',
DeprecationWarning)):
test_support.run_unittest(
DictTest,
GeneralMappingTests,
SubclassMappingTests,
)
if __name__ == "__main__":
test_main()
| agpl-3.0 | -10,874,410,745,564,362 | 28.908284 | 78 | 0.470039 | false |
custode/reviewboard | reviewboard/scmtools/svn/__init__.py | 1 | 22249 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import os
import re
import weakref
from django.conf import settings
from django.utils import six
from django.utils.translation import ugettext as _
from reviewboard.diffviewer.diffutils import convert_to_unicode
from reviewboard.diffviewer.parser import DiffParser
from reviewboard.scmtools.certs import Certificate
from reviewboard.scmtools.core import (Branch, Commit, SCMTool, HEAD,
PRE_CREATION, UNKNOWN)
from reviewboard.scmtools.errors import (AuthenticationError,
RepositoryNotFoundError,
SCMError,
UnverifiedCertificateError)
from reviewboard.ssh import utils as sshutils
# These will be set later in recompute_svn_backend().
Client = None
has_svn_backend = False
# Register these URI schemes so we can handle them properly.
sshutils.ssh_uri_schemes.append('svn+ssh')
sshutils.register_rbssh('SVN_SSH')
class SVNCertificateFailures:
"""SVN HTTPS certificate failure codes.
These map to the various SVN HTTPS certificate failures in libsvn.
"""
NOT_YET_VALID = 1 << 0
EXPIRED = 1 << 1
CN_MISMATCH = 1 << 2
UNKNOWN_CA = 1 << 3
class SVNTool(SCMTool):
name = "Subversion"
supports_post_commit = True
dependencies = {
'modules': [], # This will get filled in later in
# recompute_svn_backend()
}
COMMITS_PAGE_LIMIT = 31
def __init__(self, repository):
self.repopath = repository.path
if self.repopath[-1] == '/':
self.repopath = self.repopath[:-1]
super(SVNTool, self).__init__(repository)
if repository.local_site:
local_site_name = repository.local_site.name
else:
local_site_name = None
self.config_dir, self.client = \
self.build_client(self.repopath,
repository.username, repository.password,
local_site_name)
# If we assign a function to the pysvn Client that accesses anything
# bound to SVNClient, it'll end up keeping a reference and a copy of
# the function for every instance that gets created, and will never
# let go. This will cause a rather large memory leak.
#
# The solution is to access a weakref instead. The weakref will
# reference the repository, but it will safely go away when needed.
# The function we pass can access that without causing the leaks
repository_ref = weakref.ref(repository)
self.client.set_ssl_server_trust_prompt(
lambda trust_dict:
SVNTool._ssl_server_trust_prompt(trust_dict, repository_ref()))
# 'svn diff' produces patches which have the revision string localized
# to their system locale. This is a little ridiculous, but we have to
# deal with it because not everyone uses RBTools.
self.revision_re = re.compile("""
^(\(([^\)]+)\)\s)? # creating diffs between two branches of a
# remote repository will insert extra
# "relocation information" into the diff.
(?:\d+-\d+-\d+\ + # svnlook-style diffs contain a timestamp
\d+:\d+:\d+\ + # on each line before the revision number.
[A-Z]+\ +)? # This here is probably a really crappy
# to express that, but oh well.
\ *\((?:
[Rr]ev(?:ision)?| # English - svnlook uses 'rev 0' while svn
# diff uses 'revision 0'
revisión:| # Spanish
révision| # French
revisione| # Italian
リビジョン| # Japanese
리비전| # Korean
revisjon| # Norwegian
wersja| # Polish
revisão| # Brazilian Portuguese
版本 # Simplified Chinese
)\ (\d+)\)$
""", re.VERBOSE)
# 'svn diff' also localises the (working copy) string to the system
# locale.
self.working_copy_re = re.compile(r'''
^\((?:
working\ copy| # English
copia\ de\ trabajo| # Spanish
copie\ de\ travail| # French
copia\ locale| # Italian
作業コピー| # Japanese
작업\ 사본| # Korean
arbeidskopi| # Norweigan
kopia\ robocza| # Polish
cópia\ de\ trabalho| # Brazilian Portuguese
工作副本| # Simplified Chinese
)\)$
''', re.VERBOSE)
def get_file(self, path, revision=HEAD, **kwargs):
return self.client.get_file(path, revision)
def get_keywords(self, path, revision=HEAD):
return self.client.get_keywords(path, revision)
def get_branches(self):
"""Returns a list of branches.
This assumes the standard layout in the repository.
"""
results = []
try:
root_dirents = self.client.list_dir('/')
except Exception as e:
raise self.normalize_error(e)
default = True
if 'trunk' in root_dirents:
# Looks like the standard layout. Adds trunk and any branches.
trunk = root_dirents['trunk']
results.append(self._create_branch_from_dirent(
'trunk', trunk, default=True))
default = False
if 'branches' in root_dirents:
try:
dirents = self.client.list_dir('branches')
results += [
self._create_branch_from_dirent(name, dirents[name])
for name in sorted(six.iterkeys(dirents))
]
except Exception as e:
raise self.normalize_error(e)
# Add anything else from the root of the repository. This is a
# catch-all for repositories which do not use the standard layout, and
# for those that do, will include any additional top-level directories
# that people may have.
for name in sorted(six.iterkeys(root_dirents)):
if name not in ('trunk', 'branches'):
results.append(self._create_branch_from_dirent(
name, root_dirents[name], default))
default = False
return results
def get_commits(self, branch=None, start=None):
"""Return a list of commits."""
commits = self.client.get_log(branch or '/',
start=start,
limit=self.COMMITS_PAGE_LIMIT,
limit_to_path=False)
results = []
# We fetch one more commit than we care about, because the entries in
# the svn log doesn't include the parent revision.
for i in range(len(commits) - 1):
commit = commits[i]
parent = commits[i + 1]
results.append(Commit(
commit.get('author', ''),
commit['revision'],
commit['date'].isoformat(),
commit.get('message', ''),
parent['revision']))
# If there were fewer than the requested number of commits fetched,
# also include the last one in the list so we don't leave off the
# initial revision.
if len(commits) < self.COMMITS_PAGE_LIMIT:
commit = commits[-1]
results.append(Commit(
commit.get('author', ''),
commit['revision'],
commit['date'].isoformat(),
commit.get('message', '')))
return results
def get_change(self, revision):
"""Get an individual change.
This returns a Commit object containing the details of the commit.
"""
revision = int(revision)
commits = self.client.get_log('/', start=revision, limit=2)
commit = commits[0]
message = commit.get('message', b'').decode('utf-8', 'replace')
author_name = commit.get('author', b'').decode('utf-8', 'replace')
date = commit['date'].isoformat()
if len(commits) > 1:
base_revision = commits[1]['revision']
else:
base_revision = 0
try:
enc, diff = convert_to_unicode(
self.client.diff(base_revision, revision),
self.repository.get_encoding_list())
except Exception as e:
raise self.normalize_error(e)
commit = Commit(author_name, six.text_type(revision), date,
message, six.text_type(base_revision))
commit.diff = diff
return commit
def normalize_patch(self, patch, filename, revision=HEAD):
"""
If using Subversion, we need not only contract keywords in file, but
also in the patch. Otherwise, if a file with expanded keyword somehow
ends up in the repository (e.g. by first checking in a file without
svn:keywords and then setting svn:keywords in the repository), RB
won't be able to apply a patch to such file.
"""
if revision != PRE_CREATION:
keywords = self.get_keywords(filename, revision)
if keywords:
return self.client.collapse_keywords(patch, keywords)
return patch
def parse_diff_revision(self, file_str, revision_str, *args, **kwargs):
# Some diffs have additional tabs between the parts of the file
# revisions
revision_str = revision_str.strip()
if self.working_copy_re.match(revision_str):
return file_str, HEAD
# "(revision )" is generated by a few weird tools (like IntelliJ). If
# in the +++ line of the diff, it means HEAD, and in the --- line, it
# means PRE_CREATION. Since the more important use case is parsing the
# source revision, we treat it as a new file. See bugs 1937 and 2632.
if revision_str == "(revision )":
return file_str, PRE_CREATION
# Binary diffs don't provide revision information, so we set a fake
# "(unknown)" in the SVNDiffParser. This will never actually appear
# in SVN diffs.
if revision_str == "(unknown)":
return file_str, UNKNOWN
m = self.revision_re.match(revision_str)
if not m:
raise SCMError("Unable to parse diff revision header '%s'" %
revision_str)
relocated_file = m.group(2)
revision = m.group(3)
if revision == "0":
revision = PRE_CREATION
if relocated_file:
if not relocated_file.startswith("..."):
raise SCMError("Unable to parse SVN relocated path '%s'" %
relocated_file)
file_str = "%s/%s" % (relocated_file[4:], file_str)
return file_str, revision
def get_repository_info(self):
return self.client.repository_info
def get_fields(self):
return ['basedir', 'diff_path']
def get_parser(self, data):
return SVNDiffParser(data)
def _create_branch_from_dirent(self, name, dirent, default=False):
return Branch(
id=dirent['path'].strip('/'),
name=name,
commit=dirent['created_rev'],
default=default)
@classmethod
def normalize_error(cls, e):
if 'callback_get_login required' in six.text_type(e):
raise AuthenticationError(
msg='Authentication failed when talking to the Subversion '
'repository')
else:
raise SCMError(e)
@classmethod
def _ssl_server_trust_prompt(cls, trust_dict, repository):
"""Callback for SSL cert verification.
This will be called when accessing a repository with an SSL cert.
We will look up a matching cert in the database and see if it's
accepted.
"""
saved_cert = repository.extra_data.get('cert', {})
cert = trust_dict.copy()
del cert['failures']
return saved_cert == cert, trust_dict['failures'], False
@staticmethod
def on_ssl_failure(e, path, cert_data):
logging.error('SVN: Failed to get repository information '
'for %s: %s' % (path, e))
error = SVNTool.normalize_error(e)
if isinstance(error, AuthenticationError):
raise error
if cert_data:
failures = cert_data['failures']
reasons = []
if failures & SVNCertificateFailures.NOT_YET_VALID:
reasons.append(_('The certificate is not yet valid.'))
if failures & SVNCertificateFailures.EXPIRED:
reasons.append(_('The certificate has expired.'))
if failures & SVNCertificateFailures.CN_MISMATCH:
reasons.append(_('The certificate hostname does not '
'match.'))
if failures & SVNCertificateFailures.UNKNOWN_CA:
reasons.append(_('The certificate is not issued by a '
'trusted authority. Use the fingerprint '
'to validate the certificate manually.'))
raise UnverifiedCertificateError(
Certificate(valid_from=cert_data['valid_from'],
valid_until=cert_data['valid_until'],
hostname=cert_data['hostname'],
realm=cert_data['realm'],
fingerprint=cert_data['finger_print'],
issuer=cert_data['issuer_dname'],
failures=reasons))
raise RepositoryNotFoundError()
@classmethod
def check_repository(cls, path, username=None, password=None,
local_site_name=None):
"""
Performs checks on a repository to test its validity.
This should check if a repository exists and can be connected to.
This will also check if the repository requires an HTTPS certificate.
The result is returned as an exception. The exception may contain
extra information, such as a human-readable description of the problem.
If the repository is valid and can be connected to, no exception
will be thrown.
"""
super(SVNTool, cls).check_repository(path, username, password,
local_site_name)
if path.startswith('https://'):
client = cls.build_client(path, username, password,
local_site_name=local_site_name)[1]
client.accept_ssl_certificate(path, cls.on_ssl_failure)
@classmethod
def accept_certificate(cls, path, username=None, password=None,
local_site_name=None, certificate=None):
"""Accepts the certificate for the given repository path."""
client = cls.build_client(path, username, password,
local_site_name=local_site_name)[1]
return client.accept_ssl_certificate(path)
@classmethod
def build_client(cls, repopath, username=None, password=None,
local_site_name=None):
if not has_svn_backend:
raise ImportError(_(
'SVN integration requires either subvertpy or pysvn'))
config_dir = os.path.join(os.path.expanduser('~'), '.subversion')
if local_site_name:
# LocalSites can have their own Subversion config, used for
# per-LocalSite SSH keys.
config_dir = cls._prepare_local_site_config_dir(local_site_name)
elif not os.path.exists(config_dir):
cls._create_subversion_dir(config_dir)
client = Client(config_dir, repopath, username, password)
return config_dir, client
@classmethod
def _create_subversion_dir(cls, config_dir):
try:
os.mkdir(config_dir, 0o700)
except OSError:
raise IOError(
_("Unable to create directory %(dirname)s, which is needed "
"for the Subversion configuration. Create this directory "
"and set the web server's user as the the owner.")
% {'dirname': config_dir})
@classmethod
def _prepare_local_site_config_dir(cls, local_site_name):
config_dir = os.path.join(os.path.expanduser('~'), '.subversion')
if not os.path.exists(config_dir):
cls._create_subversion_dir(config_dir)
config_dir = os.path.join(config_dir, local_site_name)
if not os.path.exists(config_dir):
cls._create_subversion_dir(config_dir)
with open(os.path.join(config_dir, 'config'), 'w') as fp:
fp.write('[tunnels]\n')
fp.write('ssh = rbssh --rb-local-site=%s\n' % local_site_name)
return config_dir
class SVNDiffParser(DiffParser):
BINARY_STRING = b"Cannot display: file marked as a binary type."
PROPERTY_PATH_RE = re.compile(r'Property changes on: (.*)')
def parse_diff_header(self, linenum, info):
# We're looking for a SVN property change for SVN < 1.7.
#
# There's going to be at least 5 lines left:
# 1) --- (blah)
# 2) +++ (blah)
# 3) Property changes on: <path>
# 4) -----------------------------------------------------
# 5) Modified: <propname>
if (linenum + 4 < len(self.lines) and
self.lines[linenum].startswith(b'--- (') and
self.lines[linenum + 1].startswith(b'+++ (') and
self.lines[linenum + 2].startswith(b'Property changes on:')):
# Subversion diffs with property changes have no really
# parsable format. The content of a property can easily mimic
# the property change headers. So we can't rely upon it, and
# can't easily display it. Instead, skip it, so it at least
# won't break diffs.
info['skip'] = True
linenum += 4
return linenum
else:
# Handle deleted empty files.
if b'index' in info and info['index'].endswith(b'\t(deleted)'):
info['deleted'] = True
return super(SVNDiffParser, self).parse_diff_header(linenum, info)
def parse_special_header(self, linenum, info):
if (linenum + 1 < len(self.lines) and
self.lines[linenum] == b'Index:'):
# This is an empty Index: line. This might mean we're parsing
# a property change.
return linenum + 2
linenum = super(SVNDiffParser, self).parse_special_header(
linenum, info)
if 'index' in info and linenum != len(self.lines):
if self.lines[linenum] == self.BINARY_STRING:
# Skip this and the svn:mime-type line.
linenum += 2
info['binary'] = True
info['origFile'] = info['index']
info['newFile'] = info['index']
# We can't get the revision info from this diff header.
info['origInfo'] = '(unknown)'
info['newInfo'] = '(working copy)'
return linenum
def parse_after_headers(self, linenum, info):
# We're looking for a SVN property change for SVN 1.7+.
#
# This differs from SVN property changes in older versions of SVN
# in a couple ways:
#
# 1) The ---, +++, and Index: lines have actual filenames.
# Because of this, we won't hit the case in parse_diff_header
# above.
# 2) There's an actual section per-property, so we could parse these
# out in a usable form. We'd still need a way to display that
# sanely, though.
if (self.lines[linenum] == b'' and
linenum + 2 < len(self.lines) and
self.lines[linenum + 1].startswith('Property changes on:')):
# Skip over the next 3 lines (blank, "Property changes on:", and
# the "__________" divider.
info['skip'] = True
linenum += 3
return linenum
def recompute_svn_backend():
"""Recomputes the SVNTool client backend to use.
Normally, this is only called once, but it may be used to reset the
backend for use in testing.
"""
global Client
global has_svn_backend
Client = None
has_svn_backend = False
required_module = None
for backend_path in settings.SVNTOOL_BACKENDS:
try:
mod = __import__(six.binary_type(backend_path),
fromlist=['Client', 'has_svn_backend'])
# Check that this is a valid SVN backend.
if (not hasattr(mod, 'has_svn_backend') or
not hasattr(mod, 'Client')):
logging.error('Attempted to load invalid SVN backend %s',
backend_path)
continue
has_svn_backend = mod.has_svn_backend
# We want either the winning SVN backend or the first one to show
# up in the required module dependencies list.
if has_svn_backend or not required_module:
SVNTool.dependencies['modules'] = [mod.Client.required_module]
if has_svn_backend:
# We found a suitable backend.
logging.info('Using %s backend for SVN', backend_path)
Client = mod.Client
break
except ImportError:
logging.error('Unable to load SVN backend %s',
backend_path, exc_info=1)
recompute_svn_backend()
| mit | -6,548,868,346,954,437,000 | 36.882253 | 79 | 0.554484 | false |
usnistgov/corr | corr-db/corrdb/common/managers/access_manager.py | 1 | 15322 | """Manage access control.
"""
# from .. import logAccess
# from flask.ext.stormpath import StormpathManager
# from flask.ext.stormpath import user
# from flask.ext.stormpath import login_required
# from stormpath.error import Error
import flask as fk
import hashlib
import datetime
import re
def get_or_create(document=None, **kwargs):
return (document(**kwargs).save(), True)
# No admin access for now.
# Only user access is handled.
class AccessManager:
def __init__(self, app):
"""Initializes an access manager instance.
"""
self.config = app.config['ACCOUNT_MANAGEMENT']
self.secur = app.config['SECURITY_MANAGEMENT']['account']
# if self.config['type'] == 'stormpath':
# self.manager = StormpathManager(app)
# self.type = 'stormpath'
# el
if self.config['type'] == 'api-token':
self.manager = None
self.type = 'api-token'
elif self.config['type'] == 'mongodb':
self.manager = None
self.type = 'mongodb'
def create_account(self, email, password, fname, lname, mname):
"""Create an account.
Returns:
Tuple of the account object and a message in case of an error.
"""
try:
_account = self.manager.application.accounts.create({
'email': email,
'password': password,
"username" : email,
"given_name" : fname,
"middle_name" : mname,
"surname" : lname,
})
return (_account, "")
except Error as re:
print('Message: %s' %re.message)
print('HTTP Status: %s' %str(re.status))
print('Developer Message: %s' %re.developer_message)
print('More Information: %s' %re.more_info)
print('Error Code: %s' %str(re.code))
print('Message message: %s' %re.message)
return (None, re.message)
def register(self, email, password, fname, lname, mname):
"""Registration handler.
Returns:
User account registered.
"""
from corrdb.common.models import UserModel
account = None
_account = None
check_password = self.password_check(password)
if not check_password['password_ok']:
message = ["Password rules vialation:"]
if check_password['length_error']:
message.append("Must be at least 8 characters.")
if check_password['digit_error']:
message.append("Must contain at least one digit.")
if check_password['uppercase_error']:
message.append("Must contain at least one upper case character.")
if check_password['lowercase_error']:
message.append("Must contain at least one lower case character.")
if check_password['symbol_error']:
message.append("Must contain at least one special character.")
return False, message
hash_pwd = hashlib.sha256(('CoRRPassword_%s'%password).encode("ascii")).hexdigest()
if self.type == 'api-token':
pass
else:
if self.type == 'mongodb':
account = UserModel.objects(email=email).first()
elif self.type == 'stormpath':
try:
_account = self.manager.application.search(email).first()
except:
_account = None
if _account != None:
account = UserModel.objects(email=email).first()
if account is None:
if self.type == 'stormpath':
account = UserModel.objects(email=email).first()
if account is None:
(account, created) = get_or_create(document=UserModel, created_at=str(datetime.datetime.utcnow()), email=email, group='user', api_token=hashlib.sha256(('CoRRToken_%s_%s'%(email, str(datetime.datetime.utcnow()))).encode("ascii")).hexdigest())
if _account is None:
failure = self.create_account(email, password, fname, lname, mname)[0] is None
if failure:
account.password = hash_pwd
account.save()
if self.type == 'mongodb':
account = UserModel.objects(email=email).first()
if account is None:
(account, created) = get_or_create(document=UserModel, created_at=str(datetime.datetime.utcnow()), email=email, group='user', api_token=hashlib.sha256(('CoRRToken_%s_%s'%(email, str(datetime.datetime.utcnow()))).encode("ascii")).hexdigest())
account.password = hash_pwd
account.save()
account.save()
return True, account
else:
return False, account
return False, account
def login(self, email, password):
"""Account login handler.
Returns:
User account instance if successful otherwise None.
"""
from corrdb.common.models import UserModel
account = None
if self.type == 'stormpath':
try:
_account = self.manager.application.authenticate_account(email, password).account
if _account:
account = UserModel.objects(email=email).first()
else:
_account = self.manager.application.search(email).first()
if _account is None:
failure = self.create_account(email, password, "FirstName", "LastName", "")[0] is None
if failure:
hash_pwd = hashlib.sha256(('CoRRPassword_%s'%password).encode("ascii")).hexdigest()
account = UserModel.objects(email=email, password=hash_pwd).first()
else:
account = UserModel.objects(email=email).first()
else:
account = None
except Error as re:
print('Message: %s' %re.message)
print('HTTP Status: %s' %str(re.status))
print('Developer Message: %s' %re.developer_message)
print('More Information: %s' %re.more_info)
print('Error Code: %s' %str(re.code))
elif self.type == 'api-token':
# No login for api-token.
pass
elif self.type == 'mongodb':
hash_pwd = hashlib.sha256(('CoRRPassword_%s'%password).encode("ascii")).hexdigest()
account_1 = UserModel.objects(email=email).first()
if account_1:
if account_1.password is None:
account_1.password = hash_pwd
account_1.save()
account = account_1
else:
account = UserModel.objects(email=email, password=hash_pwd).first()
else:
# (account, created) = get_or_create(document=UserModel, created_at=str(datetime.datetime.utcnow()), email=email, group='user', api_token=hashlib.sha256(('CoRRToken_%s_%s'%(email, str(datetime.datetime.utcnow()))).encode("ascii")).hexdigest())
# account.password = hash_pwd
# account.save()
account = None
if account and account.group == "unknown":
account.group = "user"
account.save()
if account:
account.connected_at = str(datetime.datetime.utcnow())
if account.auth in ["wrong1", "wrong2", "wrong3"]:
account.auth = "approved"
account.save()
return account
def logout(self, session_token):
"""Session login handler.
"""
if self.type == 'stormpath':
pass
elif self.type == 'api-token':
# Not logout for api-token.
pass
elif self.type == 'mongodb':
pass
def unregister(self, session_token):
"""Account unregistration handler.
Returns:
None in case of a success. Otherwise return the account object.
"""
# No unregister yet.
if self.type == 'stormpath':
pass
elif self.type == 'api-token':
pass
elif self.type == 'mongodb':
pass
return None
def reset_password(self, email):
"""Password recovery handler.
Returns:
User Account in case of a success, otherwise None.
"""
account = None
if self.type == 'stormpath':
try:
account = self.manager.application.send_password_reset_email(email)
except Error as re:
print('Message: %s' %re.message)
print('HTTP Status: %s' %str(re.status))
print('Developer Message: %s' %re.developer_message)
print('More Information: %s' %re.more_info)
print('Error Code: %s' %str(re.code))
print('Message message: %s' %re.message['message'])
elif self.type == 'api-token':
pass
elif self.type == 'mongodb':
pass
return account
def change_password(self, user_model, password):
"""Password change handler.
Returns:
User Account in case of a success, otherwise None.
"""
account = None
check_password = self.password_check(password)
if not check_password['password_ok']:
message = ["Password rules vialation:"]
if check_password['length_error']:
message.append("Must be at least 8 characters.")
if check_password['digit_error']:
message.append("Must contain at least one digit.")
if check_password['uppercase_error']:
message.append("Must contain at least one upper case character.")
if check_password['lowercase_error']:
message.append("Must contain at least one lower case character.")
if check_password['symbol_error']:
message.append("Must contain at least one special character.")
return None, message
hash_pwd = hashlib.sha256(('CoRRPassword_%s'%password).encode("ascii")).hexdigest()
if self.type == 'stormpath':
accounts = self.manager.application.accounts
for acc in accounts:
if acc.email == user_model.email:
account = acc
break
if account:
account.password = password
elif self.type == 'api-token':
pass
elif self.type == 'mongodb':
account = user_model
account.password = hash_pwd
if account:
account.save()
return account, []
def password_check(self, password):
"""Verify the strength of 'password'.
Add https://cry.github.io/nbp/ to check for password common.
Write nbp.py from nbp.py
A password is considered strong if:
8 characters length or more
1 digit or more
1 symbol or more
1 uppercase letter or more
1 lowercase letter or more
Returns:
a dict indicating the wrong criteria.
"""
# calculating the length
length_error = len(password) < 8
# searching for digits
digit_error = re.search(r"\d", password) is None
# searching for uppercase
uppercase_error = re.search(r"[A-Z]", password) is None
# searching for lowercase
lowercase_error = re.search(r"[a-z]", password) is None
# searching for symbols
# ]\;',./!@#$%^&*()_+-=
symbol_error = not any(i in "]\;',./!@#$%^&*()_+-=]" for i in password)
# overall result
password_ok = not ( length_error or digit_error or uppercase_error or lowercase_error or symbol_error )
return {
'password_ok' : password_ok,
'length_error' : length_error,
'digit_error' : digit_error,
'uppercase_error' : uppercase_error,
'lowercase_error' : lowercase_error,
'symbol_error' : symbol_error,
}
def accounts(self):
"""Retrieve the registered accounts.
Returns:
List of registered users accounts.
"""
from corrdb.common.models import UserModel
users = None
if self.type == 'stormpath':
users = self.manager.application.accounts
elif self.type == 'api-token' or self.type == 'mongodb':
users = UserModel.objects
return users
def check_cloud(self, hash_session, acc_sec=False, cnt_sec=False):
"""Check that a session is valid.
Returns:
Tuple of Validation Boolean and the account instance.
"""
from corrdb.common.models import UserModel
if hash_session == "logout":
account = None
else:
account = UserModel.objects(session=hash_session).first()
if account is None:
return False, None
else:
# We want multiple browser logins without being thrown out.
return True, account
# # print "Connected_at: %s"%str(user_model.connected_at)
# allowance = account.allowed("%s%s"%(fk.request.headers.get('User-Agent'),fk.request.remote_addr))
# print("Allowance: {0}".format(allowance))
# # print "Connected_at: %s"%str(user_model.connected_at)
# if allowance == hash_session:
# if acc_sec and account.extend.get('access', 'verified') != 'verified':
# return False, account
# else:
# return True, account
# else:
# return False, account
def check_api(self, token, acc_sec=False, cnt_sec=False):
from corrdb.common.models import UserModel
"""Get the user object instance from its api token.
Returns:
The user object instance.
"""
print([user.extended() for user in UserModel.objects])
account = UserModel.objects(api_token=token).first()
if account.extend.get('access', 'verified') != 'verified':
return None
else:
return account
def check_app(self, token, acc_sec=False, cnt_sec=False):
from corrdb.common.models import ApplicationModel
"""Get the application object instance from its api token.
Returns:
The application object instance.
"""
if token == "no-app":
return None
else:
for application in ApplicationModel.objects:
print("{0} -- {1}.".format(str(application.developer.id), application.name))
application = ApplicationModel.objects(app_token=token).first()
developer = application.developer
if developer.extend.get('access', 'verified') != 'verified':
return None
else:
return application
| mit | -7,367,021,864,759,053,000 | 38.797403 | 265 | 0.544381 | false |
hclivess/Stallion | nuitka/Cryptodome/Hash/MD4.py | 4 | 6598 | # ===================================================================
#
# Copyright (c) 2014, Legrandin <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""
MD4 is specified in RFC1320_ and produces the 128 bit digest of a message.
>>> from Cryptodome.Hash import MD4
>>>
>>> h = MD4.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
MD4 stand for Message Digest version 4, and it was invented by Rivest in 1990.
This algorithm is insecure. Do not use it for new designs.
.. _RFC1320: http://tools.ietf.org/html/rfc1320
"""
from Cryptodome.Util.py3compat import bord
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
create_string_buffer,
get_raw_buffer, c_size_t,
c_uint8_ptr)
_raw_md4_lib = load_pycryptodome_raw_lib(
"Cryptodome.Hash._MD4",
"""
int md4_init(void **shaState);
int md4_destroy(void *shaState);
int md4_update(void *hs,
const uint8_t *buf,
size_t len);
int md4_digest(const void *shaState,
uint8_t digest[20]);
int md4_copy(const void *src, void *dst);
""")
class MD4Hash(object):
"""Class that implements an MD4 hash
"""
#: The size of the resulting hash in bytes.
digest_size = 16
#: The internal block size of the hash algorithm in bytes.
block_size = 64
#: ASN.1 Object ID
oid = "1.2.840.113549.2.4"
def __init__(self, data=None):
state = VoidPointer()
result = _raw_md4_lib.md4_init(state.address_of())
if result:
raise ValueError("Error %d while instantiating MD4"
% result)
self._state = SmartPointer(state.get(),
_raw_md4_lib.md4_destroy)
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Repeated calls are equivalent to a single call with the concatenation
of all the arguments. In other words:
>>> m.update(a); m.update(b)
is equivalent to:
>>> m.update(a+b)
:Parameters:
data : byte string/byte array/memoryview
The next chunk of the message being hashed.
"""
result = _raw_md4_lib.md4_update(self._state.get(),
c_uint8_ptr(data),
c_size_t(len(data)))
if result:
raise ValueError("Error %d while instantiating MD4"
% result)
def digest(self):
"""Return the **binary** (non-printable) digest of the message that
has been hashed so far.
This method does not change the state of the hash object.
You can continue updating the object after calling this function.
:Return: A byte string of `digest_size` bytes. It may contain non-ASCII
characters, including null bytes.
"""
bfr = create_string_buffer(self.digest_size)
result = _raw_md4_lib.md4_digest(self._state.get(),
bfr)
if result:
raise ValueError("Error %d while instantiating MD4"
% result)
return get_raw_buffer(bfr)
def hexdigest(self):
"""Return the **printable** digest of the message that has been
hashed so far.
This method does not change the state of the hash object.
:Return: A string of 2* `digest_size` characters. It contains only
hexadecimal ASCII digits.
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def copy(self):
"""Return a copy ("clone") of the hash object.
The copy will have the same internal state as the original hash
object.
This can be used to efficiently compute the digests of strings that
share a common initial substring.
:Return: A hash object of the same type
"""
clone = MD4Hash()
result = _raw_md4_lib.md4_copy(self._state.get(),
clone._state.get())
if result:
raise ValueError("Error %d while copying MD4" % result)
return clone
def new(self, data=None):
return MD4Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string/byte array/memoryview
The very first chunk of the message to hash.
It is equivalent to an early call to `MD4Hash.update()`.
Optional.
:Return: A `MD4Hash` object
"""
return MD4Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = MD4Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = MD4Hash.block_size
| gpl-3.0 | 7,390,378,160,269,993,000 | 34.664865 | 79 | 0.578054 | false |
groschovskiy/keyczar | cpp/src/tools/swtoolkit/test/include_all_tools_test.py | 18 | 3464 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for including all tools. These are SMALL tests."""
import TestFramework
def TestSConstruct(scons_globals):
"""Test SConstruct file.
Args:
scons_globals: Global variables dict from the SConscript file.
"""
# Get globals from SCons
Environment = scons_globals['Environment']
env = Environment(tools=['component_setup'])
# TODO: SDL and NACL tools generate an error if not installed. Block this
# error.
env['SDL_MODE'] = 'none'
env['NACL_SDK_VALIDATE'] = '0'
# Make sure that all tools can at least be included without failure on all
# platforms.
all_tools = [
'atlmfc_vc80',
'code_coverage',
'code_signing',
'collada_dom',
'command_output',
'component_bits',
'component_builders',
'component_setup',
'component_targets',
'component_targets_msvs',
'component_targets_xml',
'concat_source',
'defer',
'directx_9_0_c',
'directx_9_18_944_0_partial',
'distcc',
'environment_tools',
'gather_inputs',
'naclsdk',
'publish',
'replace_strings',
'replicate',
'sdl',
'seven_zip',
'target_debug',
'target_optimized',
'target_platform_linux',
'target_platform_mac',
'target_platform_windows',
'visual_studio_solution',
'windows_hard_link',
]
for tool in all_tools:
if tool not in env['TOOLS']:
print 'Adding tool %s...' % tool
# Not all tools play well together (for example, you can only use one of
# the target_platform tools at a time), so put each in a separate
# sub-environment
env.Clone(tools=[tool])
def main():
test = TestFramework.TestFramework()
# Run tests
base = 'all_tools/'
test.subdir(base)
test.WriteSConscript(base + 'SConstruct', TestSConstruct)
test.run(chdir=base)
test.pass_test()
if __name__ == '__main__':
main()
| apache-2.0 | -807,901,288,873,416,100 | 30.779817 | 78 | 0.68851 | false |
zhanqxun/cv_fish | win32/Demos/win32gui_dialog.py | 4 | 15197 | # A demo of a fairly complex dialog.
#
# Features:
# * Uses a "dynamic dialog resource" to build the dialog.
# * Uses a ListView control.
# * Dynamically resizes content.
# * Uses a second worker thread to fill the list.
# * Demostrates support for windows XP themes.
# If you are on Windows XP, and specify a '--noxp' argument, you will see:
# * alpha-blend issues with icons
# * The buttons are "old" style, rather than based on the XP theme.
# Hence, using:
# import winxpgui as win32gui
# is recommened.
# Please report any problems.
import sys
if "--noxp" in sys.argv:
import win32gui
else:
import winxpgui as win32gui
import win32gui_struct
import win32api
import win32con, winerror
import struct, array
import commctrl
import Queue
import os
IDC_SEARCHTEXT = 1024
IDC_BUTTON_SEARCH = 1025
IDC_BUTTON_DISPLAY = 1026
IDC_LISTBOX = 1027
WM_SEARCH_RESULT = win32con.WM_USER + 512
WM_SEARCH_FINISHED = win32con.WM_USER + 513
class _WIN32MASKEDSTRUCT:
def __init__(self, **kw):
full_fmt = ""
for name, fmt, default, mask in self._struct_items_:
self.__dict__[name] = None
if fmt == "z":
full_fmt += "pi"
else:
full_fmt += fmt
for name, val in kw.iteritems():
if name not in self.__dict__:
raise ValueError("LVITEM structures do not have an item '%s'" % (name,))
self.__dict__[name] = val
def __setattr__(self, attr, val):
if not attr.startswith("_") and attr not in self.__dict__:
raise AttributeError(attr)
self.__dict__[attr] = val
def toparam(self):
self._buffs = []
full_fmt = ""
vals = []
mask = 0
# calc the mask
for name, fmt, default, this_mask in self._struct_items_:
if this_mask is not None and self.__dict__.get(name) is not None:
mask |= this_mask
self.mask = mask
for name, fmt, default, this_mask in self._struct_items_:
val = self.__dict__[name]
if fmt == "z":
fmt = "Pi"
if val is None:
vals.append(0)
vals.append(0)
else:
# Note this demo still works with byte strings. An
# alternate strategy would be to use unicode natively
# and use the 'W' version of the messages - eg,
# LVM_SETITEMW etc.
val = val + "\0"
if isinstance(val, unicode):
val = val.encode("mbcs")
str_buf = array.array("b", val)
vals.append(str_buf.buffer_info()[0])
vals.append(len(val))
self._buffs.append(str_buf) # keep alive during the call.
else:
if val is None:
val = default
vals.append(val)
full_fmt += fmt
return struct.pack(*(full_fmt,) + tuple(vals))
# NOTE: See the win32gui_struct module for an alternative way of dealing
# with these structures
class LVITEM(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("iItem", "i", 0, None),
("iSubItem", "i", 0, None),
("state", "I", 0, commctrl.LVIF_STATE),
("stateMask", "I", 0, None),
("text", "z", None, commctrl.LVIF_TEXT),
("iImage", "i", 0, commctrl.LVIF_IMAGE),
("lParam", "i", 0, commctrl.LVIF_PARAM),
("iIdent", "i", 0, None),
]
class LVCOLUMN(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("fmt", "i", 0, commctrl.LVCF_FMT),
("cx", "i", 0, commctrl.LVCF_WIDTH),
("text", "z", None, commctrl.LVCF_TEXT),
("iSubItem", "i", 0, commctrl.LVCF_SUBITEM),
("iImage", "i", 0, commctrl.LVCF_IMAGE),
("iOrder", "i", 0, commctrl.LVCF_ORDER),
]
class DemoWindowBase:
def __init__(self):
win32gui.InitCommonControls()
self.hinst = win32gui.dllhandle
self.list_data = {}
def _RegisterWndClass(self):
className = "PythonDocSearch"
message_map = {}
wc = win32gui.WNDCLASS()
wc.SetDialogProc() # Make it a dialog class.
wc.hInstance = self.hinst
wc.lpszClassName = className
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hCursor = win32gui.LoadCursor( 0, win32con.IDC_ARROW )
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.lpfnWndProc = message_map # could also specify a wndproc.
# C code: wc.cbWndExtra = DLGWINDOWEXTRA + sizeof(HBRUSH) + (sizeof(COLORREF));
wc.cbWndExtra = win32con.DLGWINDOWEXTRA + struct.calcsize("Pi")
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
## py.ico went away in python 2.5, load from executable instead
this_app=win32api.GetModuleHandle(None)
try:
wc.hIcon=win32gui.LoadIcon(this_app, 1) ## python.exe and pythonw.exe
except win32gui.error:
wc.hIcon=win32gui.LoadIcon(this_app, 135) ## pythonwin's icon
try:
classAtom = win32gui.RegisterClass(wc)
except win32gui.error, err_info:
if err_info.winerror!=winerror.ERROR_CLASS_ALREADY_EXISTS:
raise
return className
def _GetDialogTemplate(self, dlgClassName):
style = win32con.WS_THICKFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT | win32con.WS_MINIMIZEBOX
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
title = "Dynamic Dialog Demo"
# Window frame and title
dlg = [ [title, (0, 0, 210, 250), style, None, (8, "MS Sans Serif"), None, dlgClassName], ]
# ID label and text box
dlg.append([130, "Enter something", -1, (5, 5, 200, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, IDC_SEARCHTEXT, (5, 15, 200, 12), s])
# Search/Display Buttons
# (x positions don't matter here)
s = cs | win32con.WS_TABSTOP
dlg.append([128, "Fill List", IDC_BUTTON_SEARCH, (5, 35, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, "Display", IDC_BUTTON_DISPLAY, (100, 35, 50, 14), s])
# List control.
# Can't make this work :(
## s = cs | win32con.WS_TABSTOP
## dlg.append(['SysListView32', "Title", IDC_LISTBOX, (5, 505, 200, 200), s])
return dlg
def _DoCreate(self, fn):
message_map = {
win32con.WM_SIZE: self.OnSize,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_NOTIFY: self.OnNotify,
win32con.WM_INITDIALOG: self.OnInitDialog,
win32con.WM_CLOSE: self.OnClose,
win32con.WM_DESTROY: self.OnDestroy,
WM_SEARCH_RESULT: self.OnSearchResult,
WM_SEARCH_FINISHED: self.OnSearchFinished,
}
dlgClassName = self._RegisterWndClass()
template = self._GetDialogTemplate(dlgClassName)
return fn(self.hinst, template, 0, message_map)
def _SetupList(self):
child_style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_BORDER | win32con.WS_HSCROLL | win32con.WS_VSCROLL
child_style |= commctrl.LVS_SINGLESEL | commctrl.LVS_SHOWSELALWAYS | commctrl.LVS_REPORT
self.hwndList = win32gui.CreateWindow("SysListView32", None, child_style, 0, 0, 100, 100, self.hwnd, IDC_LISTBOX, self.hinst, None)
child_ex_style = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETEXTENDEDLISTVIEWSTYLE, 0, 0)
child_ex_style |= commctrl.LVS_EX_FULLROWSELECT
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETEXTENDEDLISTVIEWSTYLE, 0, child_ex_style)
# Add an image list - use the builtin shell folder icon - this
# demonstrates the problem with alpha-blending of icons on XP if
# winxpgui is not used in place of win32gui.
il = win32gui.ImageList_Create(
win32api.GetSystemMetrics(win32con.SM_CXSMICON),
win32api.GetSystemMetrics(win32con.SM_CYSMICON),
commctrl.ILC_COLOR32 | commctrl.ILC_MASK,
1, # initial size
0) # cGrow
shell_dll = os.path.join(win32api.GetSystemDirectory(), "shell32.dll")
large, small = win32gui.ExtractIconEx(shell_dll, 4, 1)
win32gui.ImageList_ReplaceIcon(il, -1, small[0])
win32gui.DestroyIcon(small[0])
win32gui.DestroyIcon(large[0])
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETIMAGELIST,
commctrl.LVSIL_SMALL, il)
# Setup the list control columns.
lvc = LVCOLUMN(mask = commctrl.LVCF_FMT | commctrl.LVCF_WIDTH | commctrl.LVCF_TEXT | commctrl.LVCF_SUBITEM)
lvc.fmt = commctrl.LVCFMT_LEFT
lvc.iSubItem = 1
lvc.text = "Title"
lvc.cx = 200
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
lvc.iSubItem = 0
lvc.text = "Order"
lvc.cx = 50
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
win32gui.UpdateWindow(self.hwnd)
def ClearListItems(self):
win32gui.SendMessage(self.hwndList, commctrl.LVM_DELETEALLITEMS)
self.list_data = {}
def AddListItem(self, data, *columns):
num_items = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETITEMCOUNT)
item = LVITEM(text=columns[0], iItem = num_items)
new_index = win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTITEM, 0, item.toparam())
col_no = 1
for col in columns[1:]:
item = LVITEM(text=col, iItem = new_index, iSubItem = col_no)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETITEM, 0, item.toparam())
col_no += 1
self.list_data[new_index] = data
def OnInitDialog(self, hwnd, msg, wparam, lparam):
self.hwnd = hwnd
# centre the dialog
desktop = win32gui.GetDesktopWindow()
l,t,r,b = win32gui.GetWindowRect(self.hwnd)
dt_l, dt_t, dt_r, dt_b = win32gui.GetWindowRect(desktop)
centre_x, centre_y = win32gui.ClientToScreen( desktop, ( (dt_r-dt_l)//2, (dt_b-dt_t)//2) )
win32gui.MoveWindow(hwnd, centre_x-(r//2), centre_y-(b//2), r-l, b-t, 0)
self._SetupList()
l,t,r,b = win32gui.GetClientRect(self.hwnd)
self._DoSize(r-l,b-t, 1)
def _DoSize(self, cx, cy, repaint = 1):
# right-justify the textbox.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_SEARCHTEXT)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
win32gui.MoveWindow(ctrl, l, t, cx-l-5, b-t, repaint)
# The button.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_BUTTON_DISPLAY)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
list_y = b + 10
w = r - l
win32gui.MoveWindow(ctrl, cx - 5 - w, t, w, b-t, repaint)
# The list control
win32gui.MoveWindow(self.hwndList, 0, list_y, cx, cy-list_y, repaint)
# The last column of the list control.
new_width = cx - win32gui.SendMessage(self.hwndList, commctrl.LVM_GETCOLUMNWIDTH, 0)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETCOLUMNWIDTH, 1, new_width)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self._DoSize(x,y)
return 1
def OnSearchResult(self, hwnd, msg, wparam, lparam):
try:
while 1:
params = self.result_queue.get(0)
self.AddListItem(*params)
except Queue.Empty:
pass
def OnSearchFinished(self, hwnd, msg, wparam, lparam):
print "OnSearchFinished"
def OnNotify(self, hwnd, msg, wparam, lparam):
info = win32gui_struct.UnpackNMITEMACTIVATE(lparam)
if info.code == commctrl.NM_DBLCLK:
print "Double click on item", info.iItem+1
return 1
def OnCommand(self, hwnd, msg, wparam, lparam):
id = win32api.LOWORD(wparam)
if id == IDC_BUTTON_SEARCH:
self.ClearListItems()
def fill_slowly(q, hwnd):
import time
for i in range(20):
q.put(("whatever", str(i+1), "Search result " + str(i) ))
win32gui.PostMessage(hwnd, WM_SEARCH_RESULT, 0, 0)
time.sleep(.25)
win32gui.PostMessage(hwnd, WM_SEARCH_FINISHED, 0, 0)
import threading
self.result_queue = Queue.Queue()
thread = threading.Thread(target = fill_slowly, args=(self.result_queue, self.hwnd) )
thread.start()
elif id == IDC_BUTTON_DISPLAY:
print "Display button selected"
sel = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETNEXTITEM, -1, commctrl.LVNI_SELECTED)
print "The selected item is", sel+1
# These function differ based on how the window is used, so may be overridden
def OnClose(self, hwnd, msg, wparam, lparam):
raise NotImplementedError
def OnDestroy(self, hwnd, msg, wparam, lparam):
pass
# An implementation suitable for use with the Win32 Window functions (ie, not
# a true dialog)
class DemoWindow(DemoWindowBase):
def CreateWindow(self):
# Create the window via CreateDialogBoxIndirect - it can then
# work as a "normal" window, once a message loop is established.
self._DoCreate(win32gui.CreateDialogIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.DestroyWindow(hwnd)
# We need to arrange to a WM_QUIT message to be sent to our
# PumpMessages() loop.
def OnDestroy(self, hwnd, msg, wparam, lparam):
win32gui.PostQuitMessage(0) # Terminate the app.
# An implementation suitable for use with the Win32 Dialog functions.
class DemoDialog(DemoWindowBase):
def DoModal(self):
return self._DoCreate(win32gui.DialogBoxIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.EndDialog(hwnd, 0)
def DemoModal():
w=DemoDialog()
w.DoModal()
def DemoCreateWindow():
w=DemoWindow()
w.CreateWindow()
# PumpMessages runs until PostQuitMessage() is called by someone.
win32gui.PumpMessages()
if __name__=='__main__':
DemoModal()
DemoCreateWindow()
| apache-2.0 | -3,355,804,236,979,198,000 | 38.742627 | 172 | 0.583734 | false |
DIRACGrid/DIRAC | src/DIRAC/Core/Utilities/test/Test_Profiler.py | 2 | 3974 | """ Test for Profiler.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from os.path import dirname, join
from subprocess import Popen
import pytest
from flaky import flaky
import DIRAC
from DIRAC.Core.Utilities.Profiler import Profiler
# Mark this entire module as slow
pytestmark = pytest.mark.slow
def test_base():
p = Profiler()
res = p.pid()
assert res['OK'] is False
res = p.status()
assert res['OK'] is False
mainProcess = Popen([
'python',
join(dirname(DIRAC.__file__), 'tests/Utilities/ProcessesCreator_withChildren.py'),
])
time.sleep(1)
p = Profiler(mainProcess.pid)
res = p.pid()
assert res['OK'] is True
res = p.status()
assert res['OK'] is True
res = p.runningTime()
assert res['OK'] is True
assert res['Value'] > 0
res = p.memoryUsage()
assert res['OK'] is True
assert res['Value'] > 0
resWC = p.memoryUsage(withChildren=True)
assert resWC['OK'] is True
assert resWC['Value'] > 0
assert resWC['Value'] >= res['Value']
res = p.vSizeUsage()
assert res['OK'] is True
assert res['Value'] > 0
resWC = p.vSizeUsage(withChildren=True)
assert resWC['OK'] is True
assert resWC['Value'] > 0
assert resWC['Value'] >= res['Value']
res = p.vSizeUsage()
assert res['OK'] is True
assert res['Value'] > 0
resWC = p.vSizeUsage(withChildren=True)
assert resWC['OK'] is True
assert resWC['Value'] > 0
assert resWC['Value'] >= res['Value']
res = p.numThreads()
assert res['OK'] is True
assert res['Value'] > 0
resWC = p.numThreads(withChildren=True)
assert resWC['OK'] is True
assert resWC['Value'] > 0
assert resWC['Value'] >= res['Value']
res = p.cpuPercentage()
assert res['OK'] is True
assert res['Value'] >= 0
resWC = p.cpuPercentage(withChildren=True)
assert resWC['OK'] is True
assert resWC['Value'] >= 0
assert resWC['Value'] >= res['Value']
@flaky(max_runs=10, min_passes=2)
def test_cpuUsage():
mainProcess = Popen([
'python',
join(dirname(DIRAC.__file__), 'tests/Utilities/ProcessesCreator_withChildren.py'),
])
time.sleep(2)
p = Profiler(mainProcess.pid)
res = p.pid()
assert res['OK'] is True
res = p.status()
assert res['OK'] is True
# user
res = p.cpuUsageUser()
assert res['OK'] is True
assert res['Value'] > 0
resC = p.cpuUsageUser(withChildren=True)
assert resC['OK'] is True
assert resC['Value'] > 0
assert resC['Value'] >= res['Value']
res = p.cpuUsageUser()
assert res['OK'] is True
assert res['Value'] > 0
resC = p.cpuUsageUser(withChildren=True)
assert resC['OK'] is True
assert resC['Value'] > 0
assert resC['Value'] >= res['Value']
resT = p.cpuUsageUser(withTerminatedChildren=True)
assert resT['OK'] is True
assert resT['Value'] > 0
assert resT['Value'] >= res['Value']
resTC = p.cpuUsageUser(withChildren=True, withTerminatedChildren=True)
assert resTC['OK'] is True
assert resTC['Value'] > 0
assert resTC['Value'] >= res['Value']
# system
res = p.cpuUsageSystem()
assert res['OK'] is True
assert res['Value'] >= 0
resWC = p.cpuUsageSystem(withChildren=True)
assert resWC['OK'] is True
assert resWC['Value'] >= 0
assert resWC['Value'] >= res['Value']
res = p.cpuUsageSystem()
assert res['OK'] is True
assert res['Value'] > 0
resC = p.cpuUsageSystem(withChildren=True)
assert resC['OK'] is True
assert resC['Value'] > 0
assert resC['Value'] >= res['Value']
resT = p.cpuUsageSystem(withTerminatedChildren=True)
assert resT['OK'] is True
assert resT['Value'] > 0
assert resT['Value'] >= res['Value']
resTC = p.cpuUsageSystem(withChildren=True, withTerminatedChildren=True)
assert resTC['OK'] is True
assert resTC['Value'] > 0
assert resTC['Value'] >= res['Value']
# After this the main process will no-longer exist
mainProcess.wait()
res = p.cpuUsageUser()
assert res['OK'] is False
assert res['Errno'] == 3
| gpl-3.0 | -7,051,127,279,062,369,000 | 24.474359 | 88 | 0.658782 | false |
adaur/SickRage | lib/imdb/parser/http/movieParser.py | 40 | 80839 | """
parser.http.movieParser module (imdb package).
This module provides the classes (and the instances), used to parse the
IMDb pages on the akas.imdb.com server about a movie.
E.g., for Brian De Palma's "The Untouchables", the referred
pages would be:
combined details: http://akas.imdb.com/title/tt0094226/combined
plot summary: http://akas.imdb.com/title/tt0094226/plotsummary
...and so on...
Copyright 2004-2013 Davide Alberani <[email protected]>
2008 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import urllib
from imdb import imdbURL_base
from imdb.Person import Person
from imdb.Movie import Movie
from imdb.Company import Company
from imdb.utils import analyze_title, split_company_name_notes, _Container
from utils import build_person, DOMParserBase, Attribute, Extractor, \
analyze_imdbid
# Dictionary used to convert some section's names.
_SECT_CONV = {
'directed': 'director',
'directed by': 'director',
'directors': 'director',
'editors': 'editor',
'writing credits': 'writer',
'writers': 'writer',
'produced': 'producer',
'cinematography': 'cinematographer',
'film editing': 'editor',
'casting': 'casting director',
'costume design': 'costume designer',
'makeup department': 'make up',
'production management': 'production manager',
'second unit director or assistant director': 'assistant director',
'costume and wardrobe department': 'costume department',
'sound department': 'sound crew',
'stunts': 'stunt performer',
'other crew': 'miscellaneous crew',
'also known as': 'akas',
'country': 'countries',
'runtime': 'runtimes',
'language': 'languages',
'certification': 'certificates',
'genre': 'genres',
'created': 'creator',
'creators': 'creator',
'color': 'color info',
'plot': 'plot outline',
'seasons': 'number of seasons',
'art directors': 'art direction',
'assistant directors': 'assistant director',
'set decorators': 'set decoration',
'visual effects department': 'visual effects',
'production managers': 'production manager',
'miscellaneous': 'miscellaneous crew',
'make up department': 'make up',
'plot summary': 'plot outline',
'cinematographers': 'cinematographer',
'camera department': 'camera and electrical department',
'costume designers': 'costume designer',
'production designers': 'production design',
'production managers': 'production manager',
'music original': 'original music',
'casting directors': 'casting director',
'other companies': 'miscellaneous companies',
'producers': 'producer',
'special effects by': 'special effects department',
'special effects': 'special effects companies'
}
def _manageRoles(mo):
"""Perform some transformation on the html, so that roleIDs can
be easily retrieved."""
firstHalf = mo.group(1)
secondHalf = mo.group(2)
newRoles = []
roles = secondHalf.split(' / ')
for role in roles:
role = role.strip()
if not role:
continue
roleID = analyze_imdbid(role)
if roleID is None:
roleID = u'/'
else:
roleID += u'/'
newRoles.append(u'<div class="_imdbpyrole" roleid="%s">%s</div>' % \
(roleID, role.strip()))
return firstHalf + u' / '.join(newRoles) + mo.group(3)
_reRolesMovie = re.compile(r'(<td class="char">)(.*?)(</td>)',
re.I | re.M | re.S)
def _replaceBR(mo):
"""Replaces <br> tags with '::' (useful for some akas)"""
txt = mo.group(0)
return txt.replace('<br>', '::')
_reAkas = re.compile(r'<h5>also known as:</h5>.*?</div>', re.I | re.M | re.S)
def makeSplitter(lstrip=None, sep='|', comments=True,
origNotesSep=' (', newNotesSep='::(', strip=None):
"""Return a splitter function suitable for a given set of data."""
def splitter(x):
if not x: return x
x = x.strip()
if not x: return x
if lstrip is not None:
x = x.lstrip(lstrip).lstrip()
lx = x.split(sep)
lx[:] = filter(None, [j.strip() for j in lx])
if comments:
lx[:] = [j.replace(origNotesSep, newNotesSep, 1) for j in lx]
if strip:
lx[:] = [j.strip(strip) for j in lx]
return lx
return splitter
def _toInt(val, replace=()):
"""Return the value, converted to integer, or None; if present, 'replace'
must be a list of tuples of values to replace."""
for before, after in replace:
val = val.replace(before, after)
try:
return int(val)
except (TypeError, ValueError):
return None
class DOMHTMLMovieParser(DOMParserBase):
"""Parser for the "combined details" (and if instance.mdparse is
True also for the "main details") page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
mparser = DOMHTMLMovieParser()
result = mparser.parse(combined_details_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='title',
path="//h1",
attrs=Attribute(key='title',
path=".//text()",
postprocess=analyze_title)),
Extractor(label='glossarysections',
group="//a[@class='glossary']",
group_key="./@name",
group_key_normalize=lambda x: x.replace('_', ' '),
path="../../../..//tr",
attrs=Attribute(key=None,
multi=True,
path={'person': ".//text()",
'link': "./td[1]/a[@href]/@href"},
postprocess=lambda x: \
build_person(x.get('person') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='cast',
path="//table[@class='cast']//tr",
attrs=Attribute(key="cast",
multi=True,
path={'person': ".//text()",
'link': "td[2]/a/@href",
'roleID': \
"td[4]/div[@class='_imdbpyrole']/@roleid"},
postprocess=lambda x: \
build_person(x.get('person') or u'',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or u'').split('/'))
)),
Extractor(label='genres',
path="//div[@class='info']//a[starts-with(@href," \
" '/Sections/Genres')]",
attrs=Attribute(key="genres",
multi=True,
path="./text()")),
Extractor(label='h5sections',
path="//div[@class='info']/h5/..",
attrs=[
Attribute(key="plot summary",
path="./h5[starts-with(text(), " \
"'Plot:')]/../div/text()",
postprocess=lambda x: \
x.strip().rstrip('|').rstrip()),
Attribute(key="aspect ratio",
path="./h5[starts-with(text()," \
" 'Aspect')]/../div/text()",
postprocess=lambda x: x.strip()),
Attribute(key="mpaa",
path="./h5/a[starts-with(text()," \
" 'MPAA')]/../../div/text()",
postprocess=lambda x: x.strip()),
Attribute(key="countries",
path="./h5[starts-with(text(), " \
"'Countr')]/../div[@class='info-content']//text()",
postprocess=makeSplitter('|')),
Attribute(key="language",
path="./h5[starts-with(text(), " \
"'Language')]/..//text()",
postprocess=makeSplitter('Language:')),
Attribute(key='color info',
path="./h5[starts-with(text(), " \
"'Color')]/..//text()",
postprocess=makeSplitter('Color:')),
Attribute(key='sound mix',
path="./h5[starts-with(text(), " \
"'Sound Mix')]/..//text()",
postprocess=makeSplitter('Sound Mix:')),
# Collects akas not encosed in <i> tags.
Attribute(key='other akas',
path="./h5[starts-with(text(), " \
"'Also Known As')]/../div//text()",
postprocess=makeSplitter(sep='::',
origNotesSep='" - ',
newNotesSep='::',
strip='"')),
Attribute(key='runtimes',
path="./h5[starts-with(text(), " \
"'Runtime')]/../div/text()",
postprocess=makeSplitter()),
Attribute(key='certificates',
path="./h5[starts-with(text(), " \
"'Certificat')]/..//text()",
postprocess=makeSplitter('Certification:')),
Attribute(key='number of seasons',
path="./h5[starts-with(text(), " \
"'Seasons')]/..//text()",
postprocess=lambda x: x.count('|') + 1),
Attribute(key='original air date',
path="./h5[starts-with(text(), " \
"'Original Air Date')]/../div/text()"),
Attribute(key='tv series link',
path="./h5[starts-with(text(), " \
"'TV Series')]/..//a/@href"),
Attribute(key='tv series title',
path="./h5[starts-with(text(), " \
"'TV Series')]/..//a/text()")
]),
Extractor(label='language codes',
path="//h5[starts-with(text(), 'Language')]/..//a[starts-with(@href, '/language/')]",
attrs=Attribute(key='language codes', multi=True,
path="./@href",
postprocess=lambda x: x.split('/')[2].strip()
)),
Extractor(label='country codes',
path="//h5[starts-with(text(), 'Country')]/..//a[starts-with(@href, '/country/')]",
attrs=Attribute(key='country codes', multi=True,
path="./@href",
postprocess=lambda x: x.split('/')[2].strip()
)),
Extractor(label='creator',
path="//h5[starts-with(text(), 'Creator')]/..//a",
attrs=Attribute(key='creator', multi=True,
path={'name': "./text()",
'link': "./@href"},
postprocess=lambda x: \
build_person(x.get('name') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='thin writer',
path="//h5[starts-with(text(), 'Writer')]/..//a",
attrs=Attribute(key='thin writer', multi=True,
path={'name': "./text()",
'link': "./@href"},
postprocess=lambda x: \
build_person(x.get('name') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='thin director',
path="//h5[starts-with(text(), 'Director')]/..//a",
attrs=Attribute(key='thin director', multi=True,
path={'name': "./text()",
'link': "@href"},
postprocess=lambda x: \
build_person(x.get('name') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='top 250/bottom 100',
path="//div[@class='starbar-special']/" \
"a[starts-with(@href, '/chart/')]",
attrs=Attribute(key='top/bottom rank',
path="./text()")),
Extractor(label='series years',
path="//div[@id='tn15title']//span" \
"[starts-with(text(), 'TV series')]",
attrs=Attribute(key='series years',
path="./text()",
postprocess=lambda x: \
x.replace('TV series','').strip())),
Extractor(label='number of episodes',
path="//a[@title='Full Episode List']",
attrs=Attribute(key='number of episodes',
path="./text()",
postprocess=lambda x: \
_toInt(x, [(' Episodes', '')]))),
Extractor(label='akas',
path="//i[@class='transl']",
attrs=Attribute(key='akas', multi=True, path='text()',
postprocess=lambda x:
x.replace(' ', ' ').rstrip('-').replace('" - ',
'"::', 1).strip('"').replace(' ', ' '))),
Extractor(label='production notes/status',
path="//h5[starts-with(text(), 'Status:')]/..//div[@class='info-content']",
attrs=Attribute(key='production status',
path=".//text()",
postprocess=lambda x: x.strip().split('|')[0].strip().lower())),
Extractor(label='production notes/status updated',
path="//h5[starts-with(text(), 'Status Updated:')]/..//div[@class='info-content']",
attrs=Attribute(key='production status updated',
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='production notes/comments',
path="//h5[starts-with(text(), 'Comments:')]/..//div[@class='info-content']",
attrs=Attribute(key='production comments',
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='production notes/note',
path="//h5[starts-with(text(), 'Note:')]/..//div[@class='info-content']",
attrs=Attribute(key='production note',
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='blackcatheader',
group="//b[@class='blackcatheader']",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="../ul/li",
attrs=Attribute(key=None,
multi=True,
path={'name': "./a//text()",
'comp-link': "./a/@href",
'notes': "./text()"},
postprocess=lambda x: \
Company(name=x.get('name') or u'',
companyID=analyze_imdbid(x.get('comp-link')),
notes=(x.get('notes') or u'').strip())
)),
Extractor(label='rating',
path="//div[@class='starbar-meta']/b",
attrs=Attribute(key='rating',
path=".//text()")),
Extractor(label='votes',
path="//div[@class='starbar-meta']/a[@href]",
attrs=Attribute(key='votes',
path=".//text()")),
Extractor(label='cover url',
path="//a[@name='poster']",
attrs=Attribute(key='cover url',
path="./img/@src"))
]
preprocessors = [
(re.compile(r'(<b class="blackcatheader">.+?</b>)', re.I),
r'</div><div>\1'),
('<small>Full cast and crew for<br>', ''),
('<td> </td>', '<td>...</td>'),
('<span class="tv-extra">TV mini-series</span>',
'<span class="tv-extra">(mini)</span>'),
(_reRolesMovie, _manageRoles),
(_reAkas, _replaceBR)]
def preprocess_dom(self, dom):
# Handle series information.
xpath = self.xpath(dom, "//b[text()='Series Crew']")
if xpath:
b = xpath[-1] # In doubt, take the last one.
for a in self.xpath(b, "./following::h5/a[@class='glossary']"):
name = a.get('name')
if name:
a.set('name', 'series %s' % name)
# Remove links to IMDbPro.
for proLink in self.xpath(dom, "//span[@class='pro-link']"):
proLink.drop_tree()
# Remove some 'more' links (keep others, like the one around
# the number of votes).
for tn15more in self.xpath(dom,
"//a[@class='tn15more'][starts-with(@href, '/title/')]"):
tn15more.drop_tree()
return dom
re_space = re.compile(r'\s+')
re_airdate = re.compile(r'(.*)\s*\(season (\d+), episode (\d+)\)', re.I)
def postprocess_data(self, data):
# Convert section names.
for sect in data.keys():
if sect in _SECT_CONV:
data[_SECT_CONV[sect]] = data[sect]
del data[sect]
sect = _SECT_CONV[sect]
# Filter out fake values.
for key in data:
value = data[key]
if isinstance(value, list) and value:
if isinstance(value[0], Person):
data[key] = filter(lambda x: x.personID is not None, value)
if isinstance(value[0], _Container):
for obj in data[key]:
obj.accessSystem = self._as
obj.modFunct = self._modFunct
if 'akas' in data or 'other akas' in data:
akas = data.get('akas') or []
other_akas = data.get('other akas') or []
akas += other_akas
nakas = []
for aka in akas:
aka = aka.strip()
if aka.endswith('" -'):
aka = aka[:-3].rstrip()
nakas.append(aka)
if 'akas' in data:
del data['akas']
if 'other akas' in data:
del data['other akas']
if nakas:
data['akas'] = nakas
if 'runtimes' in data:
data['runtimes'] = [x.replace(' min', u'')
for x in data['runtimes']]
if 'original air date' in data:
oid = self.re_space.sub(' ', data['original air date']).strip()
data['original air date'] = oid
aid = self.re_airdate.findall(oid)
if aid and len(aid[0]) == 3:
date, season, episode = aid[0]
date = date.strip()
try: season = int(season)
except: pass
try: episode = int(episode)
except: pass
if date and date != '????':
data['original air date'] = date
else:
del data['original air date']
# Handle also "episode 0".
if season or type(season) is type(0):
data['season'] = season
if episode or type(season) is type(0):
data['episode'] = episode
for k in ('writer', 'director'):
t_k = 'thin %s' % k
if t_k not in data:
continue
if k not in data:
data[k] = data[t_k]
del data[t_k]
if 'top/bottom rank' in data:
tbVal = data['top/bottom rank'].lower()
if tbVal.startswith('top'):
tbKey = 'top 250 rank'
tbVal = _toInt(tbVal, [('top 250: #', '')])
else:
tbKey = 'bottom 100 rank'
tbVal = _toInt(tbVal, [('bottom 100: #', '')])
if tbVal:
data[tbKey] = tbVal
del data['top/bottom rank']
if 'year' in data and data['year'] == '????':
del data['year']
if 'tv series link' in data:
if 'tv series title' in data:
data['episode of'] = Movie(title=data['tv series title'],
movieID=analyze_imdbid(
data['tv series link']),
accessSystem=self._as,
modFunct=self._modFunct)
del data['tv series title']
del data['tv series link']
if 'rating' in data:
try:
data['rating'] = float(data['rating'].replace('/10', ''))
except (TypeError, ValueError):
pass
if 'votes' in data:
try:
votes = data['votes'].replace(',', '').replace('votes', '')
data['votes'] = int(votes)
except (TypeError, ValueError):
pass
return data
def _process_plotsummary(x):
"""Process a plot (contributed by Rdian06)."""
xauthor = x.get('author')
xplot = x.get('plot', u'').strip()
if xauthor:
xplot += u'::%s' % xauthor
return xplot
class DOMHTMLPlotParser(DOMParserBase):
"""Parser for the "plot summary" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a 'plot' key, containing a list
of string with the structure: 'summary::summary_author <author@email>'.
Example:
pparser = HTMLPlotParser()
result = pparser.parse(plot_summary_html_string)
"""
_defGetRefs = True
# Notice that recently IMDb started to put the email of the
# author only in the link, that we're not collecting, here.
extractors = [Extractor(label='plot',
path="//ul[@class='zebraList']//p",
attrs=Attribute(key='plot',
multi=True,
path={'plot': './text()[1]',
'author': './span/em/a/text()'},
postprocess=_process_plotsummary))]
def _process_award(x):
award = {}
_award = x.get('award')
if _award is not None:
_award = _award.strip()
award['award'] = _award
if not award['award']:
return {}
award['year'] = x.get('year').strip()
if award['year'] and award['year'].isdigit():
award['year'] = int(award['year'])
award['result'] = x.get('result').strip()
category = x.get('category').strip()
if category:
award['category'] = category
received_with = x.get('with')
if received_with is not None:
award['with'] = received_with.strip()
notes = x.get('notes')
if notes is not None:
notes = notes.strip()
if notes:
award['notes'] = notes
award['anchor'] = x.get('anchor')
return award
class DOMHTMLAwardsParser(DOMParserBase):
"""Parser for the "awards" page of a given person or movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
awparser = HTMLAwardsParser()
result = awparser.parse(awards_html_string)
"""
subject = 'title'
_containsObjects = True
extractors = [
Extractor(label='awards',
group="//table//big",
group_key="./a",
path="./ancestor::tr[1]/following-sibling::tr/" \
"td[last()][not(@colspan)]",
attrs=Attribute(key=None,
multi=True,
path={
'year': "../td[1]/a/text()",
'result': "../td[2]/b/text()",
'award': "../td[3]/text()",
'category': "./text()[1]",
# FIXME: takes only the first co-recipient
'with': "./small[starts-with(text()," \
" 'Shared with:')]/following-sibling::a[1]/text()",
'notes': "./small[last()]//text()",
'anchor': ".//text()"
},
postprocess=_process_award
)),
Extractor(label='recipients',
group="//table//big",
group_key="./a",
path="./ancestor::tr[1]/following-sibling::tr/" \
"td[last()]/small[1]/preceding-sibling::a",
attrs=Attribute(key=None,
multi=True,
path={
'name': "./text()",
'link': "./@href",
'anchor': "..//text()"
}
))
]
preprocessors = [
(re.compile('(<tr><td[^>]*>.*?</td></tr>\n\n</table>)', re.I),
r'\1</table>'),
(re.compile('(<tr><td[^>]*>\n\n<big>.*?</big></td></tr>)', re.I),
r'</table><table class="_imdbpy">\1'),
(re.compile('(<table[^>]*>\n\n)</table>(<table)', re.I), r'\1\2'),
(re.compile('(<small>.*?)<br>(.*?</small)', re.I), r'\1 \2'),
(re.compile('(</tr>\n\n)(<td)', re.I), r'\1<tr>\2')
]
def preprocess_dom(self, dom):
"""Repeat td elements according to their rowspan attributes
in subsequent tr elements.
"""
cols = self.xpath(dom, "//td[@rowspan]")
for col in cols:
span = int(col.get('rowspan'))
del col.attrib['rowspan']
position = len(self.xpath(col, "./preceding-sibling::td"))
row = col.getparent()
for tr in self.xpath(row, "./following-sibling::tr")[:span-1]:
# if not cloned, child will be moved to new parent
clone = self.clone(col)
# XXX: beware that here we don't use an "adapted" function,
# because both BeautifulSoup and lxml uses the same
# "insert" method.
tr.insert(position, clone)
return dom
def postprocess_data(self, data):
if len(data) == 0:
return {}
nd = []
for key in data.keys():
dom = self.get_dom(key)
assigner = self.xpath(dom, "//a/text()")[0]
for entry in data[key]:
if not entry.has_key('name'):
if not entry:
continue
# this is an award, not a recipient
entry['assigner'] = assigner.strip()
# find the recipients
matches = [p for p in data[key]
if p.has_key('name') and (entry['anchor'] ==
p['anchor'])]
if self.subject == 'title':
recipients = [Person(name=recipient['name'],
personID=analyze_imdbid(recipient['link']))
for recipient in matches]
entry['to'] = recipients
elif self.subject == 'name':
recipients = [Movie(title=recipient['name'],
movieID=analyze_imdbid(recipient['link']))
for recipient in matches]
entry['for'] = recipients
nd.append(entry)
del entry['anchor']
return {'awards': nd}
class DOMHTMLTaglinesParser(DOMParserBase):
"""Parser for the "taglines" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = DOMHTMLTaglinesParser()
result = tparser.parse(taglines_html_string)
"""
extractors = [Extractor(label='taglines',
path='//*[contains(concat(" ", normalize-space(@class), " "), " soda ")]',
attrs=Attribute(key='taglines',
multi=True,
path="./text()"))]
def postprocess_data(self, data):
if 'taglines' in data:
data['taglines'] = [tagline.strip() for tagline in data['taglines']]
return data
class DOMHTMLKeywordsParser(DOMParserBase):
"""Parser for the "keywords" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
kwparser = DOMHTMLKeywordsParser()
result = kwparser.parse(keywords_html_string)
"""
extractors = [Extractor(label='keywords',
path="//a[starts-with(@href, '/keyword/')]",
attrs=Attribute(key='keywords',
path="./text()", multi=True,
postprocess=lambda x: \
x.lower().replace(' ', '-')))]
class DOMHTMLAlternateVersionsParser(DOMParserBase):
"""Parser for the "alternate versions" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
avparser = HTMLAlternateVersionsParser()
result = avparser.parse(alternateversions_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='alternate versions',
path="//ul[@class='trivia']/li",
attrs=Attribute(key='alternate versions',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip()))]
class DOMHTMLTriviaParser(DOMParserBase):
"""Parser for the "trivia" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
avparser = HTMLAlternateVersionsParser()
result = avparser.parse(alternateversions_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='alternate versions',
path="//div[@class='sodatext']",
attrs=Attribute(key='trivia',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip()))]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
for qLink in self.xpath(dom, "//span[@class='linksoda']"):
qLink.drop_tree()
return dom
class DOMHTMLSoundtrackParser(DOMHTMLAlternateVersionsParser):
kind = 'soundtrack'
preprocessors = [
('<br>', '\n')
]
def postprocess_data(self, data):
if 'alternate versions' in data:
nd = []
for x in data['alternate versions']:
ds = x.split('\n')
title = ds[0]
if title[0] == '"' and title[-1] == '"':
title = title[1:-1]
nds = []
newData = {}
for l in ds[1:]:
if ' with ' in l or ' by ' in l or ' from ' in l \
or ' of ' in l or l.startswith('From '):
nds.append(l)
else:
if nds:
nds[-1] += l
else:
nds.append(l)
newData[title] = {}
for l in nds:
skip = False
for sep in ('From ',):
if l.startswith(sep):
fdix = len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
skip = True
if not skip:
for sep in ' with ', ' by ', ' from ', ' of ':
fdix = l.find(sep)
if fdix != -1:
fdix = fdix+len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
break
nd.append(newData)
data['soundtrack'] = nd
return data
class DOMHTMLCrazyCreditsParser(DOMParserBase):
"""Parser for the "crazy credits" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
ccparser = DOMHTMLCrazyCreditsParser()
result = ccparser.parse(crazycredits_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='crazy credits', path="//ul/li/tt",
attrs=Attribute(key='crazy credits', multi=True,
path=".//text()",
postprocess=lambda x: \
x.replace('\n', ' ').replace(' ', ' ')))]
def _process_goof(x):
if x['spoiler_category']:
return x['spoiler_category'].strip() + ': SPOILER: ' + x['text'].strip()
else:
return x['category'].strip() + ': ' + x['text'].strip()
class DOMHTMLGoofsParser(DOMParserBase):
"""Parser for the "goofs" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
gparser = DOMHTMLGoofsParser()
result = gparser.parse(goofs_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='goofs', path="//div[@class='soda odd']",
attrs=Attribute(key='goofs', multi=True,
path={
'text':"./text()",
'category':'./preceding-sibling::h4[1]/text()',
'spoiler_category': './h4/text()'
},
postprocess=_process_goof))]
class DOMHTMLQuotesParser(DOMParserBase):
"""Parser for the "memorable quotes" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
qparser = DOMHTMLQuotesParser()
result = qparser.parse(quotes_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='quotes_odd',
path="//div[@class='quote soda odd']",
attrs=Attribute(key='quotes_odd',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip().replace(' \n',
'::').replace('::\n', '::').replace('\n', ' '))),
Extractor(label='quotes_even',
path="//div[@class='quote soda even']",
attrs=Attribute(key='quotes_even',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip().replace(' \n',
'::').replace('::\n', '::').replace('\n', ' ')))
]
preprocessors = [
(re.compile('<a href="#" class="hidesoda hidden">Hide options</a><br>', re.I), '')
]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
for qLink in self.xpath(dom, "//span[@class='linksoda']"):
qLink.drop_tree()
for qLink in self.xpath(dom, "//div[@class='sharesoda_pre']"):
qLink.drop_tree()
return dom
def postprocess_data(self, data):
quotes = data.get('quotes_odd', []) + data.get('quotes_even', [])
if not quotes:
return {}
quotes = [q.split('::') for q in quotes]
return {'quotes': quotes}
class DOMHTMLReleaseinfoParser(DOMParserBase):
"""Parser for the "release dates" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rdparser = DOMHTMLReleaseinfoParser()
result = rdparser.parse(releaseinfo_html_string)
"""
extractors = [Extractor(label='release dates',
path="//table[@id='release_dates']//tr",
attrs=Attribute(key='release dates', multi=True,
path={'country': ".//td[1]//text()",
'date': ".//td[2]//text()",
'notes': ".//td[3]//text()"})),
Extractor(label='akas',
path="//table[@id='akas']//tr",
attrs=Attribute(key='akas', multi=True,
path={'title': "./td[1]/text()",
'countries': "./td[2]/text()"}))]
preprocessors = [
(re.compile('(<h5><a name="?akas"?.*</table>)', re.I | re.M | re.S),
r'<div class="_imdbpy_akas">\1</div>')]
def postprocess_data(self, data):
if not ('release dates' in data or 'akas' in data): return data
releases = data.get('release dates') or []
rl = []
for i in releases:
country = i.get('country')
date = i.get('date')
if not (country and date): continue
country = country.strip()
date = date.strip()
if not (country and date): continue
notes = i['notes']
info = u'%s::%s' % (country, date)
if notes:
info += notes
rl.append(info)
if releases:
del data['release dates']
if rl:
data['release dates'] = rl
akas = data.get('akas') or []
nakas = []
for aka in akas:
title = (aka.get('title') or '').strip()
if not title:
continue
countries = (aka.get('countries') or '').split(',')
if not countries:
nakas.append(title)
else:
for country in countries:
nakas.append('%s::%s' % (title, country.strip()))
if akas:
del data['akas']
if nakas:
data['akas from release info'] = nakas
return data
class DOMHTMLRatingsParser(DOMParserBase):
"""Parser for the "user ratings" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rparser = DOMHTMLRatingsParser()
result = rparser.parse(userratings_html_string)
"""
re_means = re.compile('mean\s*=\s*([0-9]\.[0-9])\.\s*median\s*=\s*([0-9])',
re.I)
extractors = [
Extractor(label='number of votes',
path="//td[b='Percentage']/../../tr",
attrs=[Attribute(key='votes',
multi=True,
path={
'votes': "td[1]//text()",
'ordinal': "td[3]//text()"
})]),
Extractor(label='mean and median',
path="//p[starts-with(text(), 'Arithmetic mean')]",
attrs=Attribute(key='mean and median',
path="text()")),
Extractor(label='rating',
path="//a[starts-with(@href, '/search/title?user_rating=')]",
attrs=Attribute(key='rating',
path="text()")),
Extractor(label='demographic voters',
path="//td[b='Average']/../../tr",
attrs=Attribute(key='demographic voters',
multi=True,
path={
'voters': "td[1]//text()",
'votes': "td[2]//text()",
'average': "td[3]//text()"
})),
Extractor(label='top 250',
path="//a[text()='top 250']",
attrs=Attribute(key='top 250',
path="./preceding-sibling::text()[1]"))
]
def postprocess_data(self, data):
nd = {}
votes = data.get('votes', [])
if votes:
nd['number of votes'] = {}
for i in xrange(1, 11):
_ordinal = int(votes[i]['ordinal'])
_strvts = votes[i]['votes'] or '0'
nd['number of votes'][_ordinal] = \
int(_strvts.replace(',', ''))
mean = data.get('mean and median', '')
if mean:
means = self.re_means.findall(mean)
if means and len(means[0]) == 2:
am, med = means[0]
try: am = float(am)
except (ValueError, OverflowError): pass
if type(am) is type(1.0):
nd['arithmetic mean'] = am
try: med = int(med)
except (ValueError, OverflowError): pass
if type(med) is type(0):
nd['median'] = med
if 'rating' in data:
nd['rating'] = float(data['rating'])
dem_voters = data.get('demographic voters')
if dem_voters:
nd['demographic'] = {}
for i in xrange(1, len(dem_voters)):
if (dem_voters[i]['votes'] is not None) \
and (dem_voters[i]['votes'].strip()):
nd['demographic'][dem_voters[i]['voters'].strip().lower()] \
= (int(dem_voters[i]['votes'].replace(',', '')),
float(dem_voters[i]['average']))
if 'imdb users' in nd.get('demographic', {}):
nd['votes'] = nd['demographic']['imdb users'][0]
nd['demographic']['all votes'] = nd['demographic']['imdb users']
del nd['demographic']['imdb users']
top250 = data.get('top 250')
if top250:
sd = top250[9:]
i = sd.find(' ')
if i != -1:
sd = sd[:i]
try: sd = int(sd)
except (ValueError, OverflowError): pass
if type(sd) is type(0):
nd['top 250 rank'] = sd
return nd
class DOMHTMLEpisodesRatings(DOMParserBase):
"""Parser for the "episode ratings ... by date" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
erparser = DOMHTMLEpisodesRatings()
result = erparser.parse(eprating_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='title', path="//title",
attrs=Attribute(key='title', path="./text()")),
Extractor(label='ep ratings',
path="//th/../..//tr",
attrs=Attribute(key='episodes', multi=True,
path={'nr': ".//td[1]/text()",
'ep title': ".//td[2]//text()",
'movieID': ".//td[2]/a/@href",
'rating': ".//td[3]/text()",
'votes': ".//td[4]/text()"}))]
def postprocess_data(self, data):
if 'title' not in data or 'episodes' not in data: return {}
nd = []
title = data['title']
for i in data['episodes']:
ept = i['ep title']
movieID = analyze_imdbid(i['movieID'])
votes = i['votes']
rating = i['rating']
if not (ept and movieID and votes and rating): continue
try:
votes = int(votes.replace(',', '').replace('.', ''))
except:
pass
try:
rating = float(rating)
except:
pass
ept = ept.strip()
ept = u'%s {%s' % (title, ept)
nr = i['nr']
if nr:
ept += u' (#%s)' % nr.strip()
ept += '}'
if movieID is not None:
movieID = str(movieID)
m = Movie(title=ept, movieID=movieID, accessSystem=self._as,
modFunct=self._modFunct)
epofdict = m.get('episode of')
if epofdict is not None:
m['episode of'] = Movie(data=epofdict, accessSystem=self._as,
modFunct=self._modFunct)
nd.append({'episode': m, 'votes': votes, 'rating': rating})
return {'episodes rating': nd}
def _normalize_href(href):
if (href is not None) and (not href.lower().startswith('http://')):
if href.startswith('/'): href = href[1:]
# TODO: imdbURL_base may be set by the user!
href = '%s%s' % (imdbURL_base, href)
return href
class DOMHTMLCriticReviewsParser(DOMParserBase):
"""Parser for the "critic reviews" pages of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
osparser = DOMHTMLCriticReviewsParser()
result = osparser.parse(officialsites_html_string)
"""
kind = 'critic reviews'
extractors = [
Extractor(label='metascore',
path="//div[@class='metascore_wrap']/div/span",
attrs=Attribute(key='metascore',
path=".//text()")),
Extractor(label='metacritic url',
path="//div[@class='article']/div[@class='see-more']/a",
attrs=Attribute(key='metacritic url',
path="./@href")) ]
class DOMHTMLOfficialsitesParser(DOMParserBase):
"""Parser for the "official sites", "external reviews", "newsgroup
reviews", "miscellaneous links", "sound clips", "video clips" and
"photographs" pages of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
osparser = DOMHTMLOfficialsitesParser()
result = osparser.parse(officialsites_html_string)
"""
kind = 'official sites'
extractors = [
Extractor(label='site',
path="//ol/li/a",
attrs=Attribute(key='self.kind',
multi=True,
path={
'link': "./@href",
'info': "./text()"
},
postprocess=lambda x: (x.get('info').strip(),
urllib.unquote(_normalize_href(x.get('link'))))))
]
class DOMHTMLConnectionParser(DOMParserBase):
"""Parser for the "connections" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
connparser = DOMHTMLConnectionParser()
result = connparser.parse(connections_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='connection',
group="//div[@class='_imdbpy']",
group_key="./h5/text()",
group_key_normalize=lambda x: x.lower(),
path="./a",
attrs=Attribute(key=None,
path={'title': "./text()",
'movieID': "./@href"},
multi=True))]
preprocessors = [
('<h5>', '</div><div class="_imdbpy"><h5>'),
# To get the movie's year.
('</a> (', ' ('),
('\n<br/>', '</a>'),
('<br/> - ', '::')
]
def postprocess_data(self, data):
for key in data.keys():
nl = []
for v in data[key]:
title = v['title']
ts = title.split('::', 1)
title = ts[0].strip()
notes = u''
if len(ts) == 2:
notes = ts[1].strip()
m = Movie(title=title,
movieID=analyze_imdbid(v['movieID']),
accessSystem=self._as, notes=notes,
modFunct=self._modFunct)
nl.append(m)
data[key] = nl
if not data: return {}
return {'connections': data}
class DOMHTMLLocationsParser(DOMParserBase):
"""Parser for the "locations" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
lparser = DOMHTMLLocationsParser()
result = lparser.parse(locations_html_string)
"""
extractors = [Extractor(label='locations', path="//dt",
attrs=Attribute(key='locations', multi=True,
path={'place': ".//text()",
'note': "./following-sibling::dd[1]" \
"//text()"},
postprocess=lambda x: (u'%s::%s' % (
x['place'].strip(),
(x['note'] or u'').strip())).strip(':')))]
class DOMHTMLTechParser(DOMParserBase):
"""Parser for the "technical", "business", "literature",
"publicity" (for people) and "contacts (for people) pages of
a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = HTMLTechParser()
result = tparser.parse(technical_html_string)
"""
kind = 'tech'
extractors = [Extractor(label='tech',
group="//h5",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="./following-sibling::div[1]",
attrs=Attribute(key=None,
path=".//text()",
postprocess=lambda x: [t.strip()
for t in x.split('\n') if t.strip()]))]
preprocessors = [
(re.compile('(<h5>.*?</h5>)', re.I), r'</div>\1<div class="_imdbpy">'),
(re.compile('((<br/>|</p>|</table>))\n?<br/>(?!<a)', re.I),
r'\1</div>'),
# the ones below are for the publicity parser
(re.compile('<p>(.*?)</p>', re.I), r'\1<br/>'),
(re.compile('(</td><td valign="top">)', re.I), r'\1::'),
(re.compile('(</tr><tr>)', re.I), r'\n\1'),
# this is for splitting individual entries
(re.compile('<br/>', re.I), r'\n'),
]
def postprocess_data(self, data):
for key in data:
data[key] = filter(None, data[key])
if self.kind in ('literature', 'business', 'contacts') and data:
if 'screenplay/teleplay' in data:
data['screenplay-teleplay'] = data['screenplay/teleplay']
del data['screenplay/teleplay']
data = {self.kind: data}
else:
if self.kind == 'publicity':
if 'biography (print)' in data:
data['biography-print'] = data['biography (print)']
del data['biography (print)']
# Tech info.
for key in data.keys():
if key.startswith('film negative format'):
data['film negative format'] = data[key]
del data[key]
elif key.startswith('film length'):
data['film length'] = data[key]
del data[key]
return data
class DOMHTMLRecParser(DOMParserBase):
"""Parser for the "recommendations" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rparser = HTMLRecParser()
result = rparser.parse(recommendations_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='recommendations',
path="//td[@valign='middle'][1]",
attrs=Attribute(key='../../tr/td[1]//text()',
multi=True,
path={'title': ".//text()",
'movieID': ".//a/@href"}))]
def postprocess_data(self, data):
for key in data.keys():
n_key = key
n_keyl = n_key.lower()
if n_keyl == 'suggested by the database':
n_key = 'database'
elif n_keyl == 'imdb users recommend':
n_key = 'users'
data[n_key] = [Movie(title=x['title'],
movieID=analyze_imdbid(x['movieID']),
accessSystem=self._as, modFunct=self._modFunct)
for x in data[key]]
del data[key]
if data: return {'recommendations': data}
return data
class DOMHTMLNewsParser(DOMParserBase):
"""Parser for the "news" page of a given movie or person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
nwparser = DOMHTMLNewsParser()
result = nwparser.parse(news_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='news',
path="//h2",
attrs=Attribute(key='news',
multi=True,
path={
'title': "./text()",
'fromdate': "../following-sibling::p[1]/small//text()",
# FIXME: sometimes (see The Matrix (1999)) <p> is found
# inside news text.
'body': "../following-sibling::p[2]//text()",
'link': "../..//a[text()='Permalink']/@href",
'fulllink': "../..//a[starts-with(text(), " \
"'See full article at')]/@href"
},
postprocess=lambda x: {
'title': x.get('title').strip(),
'date': x.get('fromdate').split('|')[0].strip(),
'from': x.get('fromdate').split('|')[1].replace('From ',
'').strip(),
'body': (x.get('body') or u'').strip(),
'link': _normalize_href(x.get('link')),
'full article link': _normalize_href(x.get('fulllink'))
}))
]
preprocessors = [
(re.compile('(<a name=[^>]+><h2>)', re.I), r'<div class="_imdbpy">\1'),
(re.compile('(<hr/>)', re.I), r'</div>\1'),
(re.compile('<p></p>', re.I), r'')
]
def postprocess_data(self, data):
if not data.has_key('news'):
return {}
for news in data['news']:
if news.has_key('full article link'):
if news['full article link'] is None:
del news['full article link']
return data
def _parse_review(x):
result = {}
title = x.get('title').strip()
if title[-1] == ':': title = title[:-1]
result['title'] = title
result['link'] = _normalize_href(x.get('link'))
kind = x.get('kind').strip()
if kind[-1] == ':': kind = kind[:-1]
result['review kind'] = kind
text = x.get('review').replace('\n\n', '||').replace('\n', ' ').split('||')
review = '\n'.join(text)
if x.get('author') is not None:
author = x.get('author').strip()
review = review.split(author)[0].strip()
result['review author'] = author[2:]
if x.get('item') is not None:
item = x.get('item').strip()
review = review[len(item):].strip()
review = "%s: %s" % (item, review)
result['review'] = review
return result
class DOMHTMLSeasonEpisodesParser(DOMParserBase):
"""Parser for the "episode list" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = DOMHTMLSeasonEpisodesParser()
result = sparser.parse(episodes_html_string)
"""
extractors = [
Extractor(label='series link',
path="//div[@class='parent']",
attrs=[Attribute(key='series link',
path=".//a/@href")]
),
Extractor(label='series title',
path="//head/meta[@property='og:title']",
attrs=[Attribute(key='series title',
path="./@content")]
),
Extractor(label='seasons list',
path="//select[@id='bySeason']//option",
attrs=[Attribute(key='_seasons',
multi=True,
path="./@value")]),
Extractor(label='selected season',
path="//select[@id='bySeason']//option[@selected]",
attrs=[Attribute(key='_current_season',
path='./@value')]),
Extractor(label='episodes',
path=".",
group="//div[@class='info']",
group_key=".//meta/@content",
group_key_normalize=lambda x: 'episode %s' % x,
attrs=[Attribute(key=None,
multi=True,
path={
"link": ".//strong//a[@href][1]/@href",
"original air date": ".//div[@class='airdate']/text()",
"title": ".//strong//text()",
"plot": ".//div[@class='item_description']//text()"
}
)]
)
]
def postprocess_data(self, data):
series_id = analyze_imdbid(data.get('series link'))
series_title = data.get('series title', '').strip()
selected_season = data.get('_current_season',
'unknown season').strip()
if not (series_id and series_title):
return {}
series = Movie(title=series_title, movieID=str(series_id),
accessSystem=self._as, modFunct=self._modFunct)
if series.get('kind') == 'movie':
series['kind'] = u'tv series'
try: selected_season = int(selected_season)
except: pass
nd = {selected_season: {}}
if 'episode -1' in data:
counter = 1
for episode in data['episode -1']:
while 'episode %d' % counter in data:
counter += 1
k = 'episode %d' % counter
data[k] = [episode]
del data['episode -1']
for episode_nr, episode in data.iteritems():
if not (episode and episode[0] and
episode_nr.startswith('episode ')):
continue
episode = episode[0]
episode_nr = episode_nr[8:].rstrip()
try: episode_nr = int(episode_nr)
except: pass
episode_id = analyze_imdbid(episode.get('link' ''))
episode_air_date = episode.get('original air date',
'').strip()
episode_title = episode.get('title', '').strip()
episode_plot = episode.get('plot', '')
if not (episode_nr is not None and episode_id and episode_title):
continue
ep_obj = Movie(movieID=episode_id, title=episode_title,
accessSystem=self._as, modFunct=self._modFunct)
ep_obj['kind'] = u'episode'
ep_obj['episode of'] = series
ep_obj['season'] = selected_season
ep_obj['episode'] = episode_nr
if episode_air_date:
ep_obj['original air date'] = episode_air_date
if episode_air_date[-4:].isdigit():
ep_obj['year'] = episode_air_date[-4:]
if episode_plot:
ep_obj['plot'] = episode_plot
nd[selected_season][episode_nr] = ep_obj
_seasons = data.get('_seasons') or []
for idx, season in enumerate(_seasons):
try: _seasons[idx] = int(season)
except: pass
return {'episodes': nd, '_seasons': _seasons,
'_current_season': selected_season}
def _build_episode(x):
"""Create a Movie object for a given series' episode."""
episode_id = analyze_imdbid(x.get('link'))
episode_title = x.get('title')
e = Movie(movieID=episode_id, title=episode_title)
e['kind'] = u'episode'
oad = x.get('oad')
if oad:
e['original air date'] = oad.strip()
year = x.get('year')
if year is not None:
year = year[5:]
if year == 'unknown': year = u'????'
if year and year.isdigit():
year = int(year)
e['year'] = year
else:
if oad and oad[-4:].isdigit():
e['year'] = int(oad[-4:])
epinfo = x.get('episode')
if epinfo is not None:
season, episode = epinfo.split(':')[0].split(',')
e['season'] = int(season[7:])
e['episode'] = int(episode[8:])
else:
e['season'] = 'unknown'
e['episode'] = 'unknown'
plot = x.get('plot')
if plot:
e['plot'] = plot.strip()
return e
class DOMHTMLEpisodesParser(DOMParserBase):
"""Parser for the "episode list" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
eparser = DOMHTMLEpisodesParser()
result = eparser.parse(episodes_html_string)
"""
# XXX: no more used for the list of episodes parser,
# but only for the episodes cast parser (see below).
_containsObjects = True
kind = 'episodes list'
_episodes_path = "..//h4"
_oad_path = "./following-sibling::span/strong[1]/text()"
def _init(self):
self.extractors = [
Extractor(label='series',
path="//html",
attrs=[Attribute(key='series title',
path=".//title/text()"),
Attribute(key='series movieID',
path=".//h1/a[@class='main']/@href",
postprocess=analyze_imdbid)
]),
Extractor(label='episodes',
group="//div[@class='_imdbpy']/h3",
group_key="./a/@name",
path=self._episodes_path,
attrs=Attribute(key=None,
multi=True,
path={
'link': "./a/@href",
'title': "./a/text()",
'year': "./preceding-sibling::a[1]/@name",
'episode': "./text()[1]",
'oad': self._oad_path,
'plot': "./following-sibling::text()[1]"
},
postprocess=_build_episode))]
if self.kind == 'episodes cast':
self.extractors += [
Extractor(label='cast',
group="//h4",
group_key="./text()[1]",
group_key_normalize=lambda x: x.strip(),
path="./following-sibling::table[1]//td[@class='nm']",
attrs=Attribute(key=None,
multi=True,
path={'person': "..//text()",
'link': "./a/@href",
'roleID': \
"../td[4]/div[@class='_imdbpyrole']/@roleid"},
postprocess=lambda x: \
build_person(x.get('person') or u'',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or u'').split('/'),
accessSystem=self._as,
modFunct=self._modFunct)))
]
preprocessors = [
(re.compile('(<hr/>\n)(<h3>)', re.I),
r'</div>\1<div class="_imdbpy">\2'),
(re.compile('(</p>\n\n)</div>', re.I), r'\1'),
(re.compile('<h3>(.*?)</h3>', re.I), r'<h4>\1</h4>'),
(_reRolesMovie, _manageRoles),
(re.compile('(<br/> <br/>\n)(<hr/>)', re.I), r'\1</div>\2')
]
def postprocess_data(self, data):
# A bit extreme?
if not 'series title' in data: return {}
if not 'series movieID' in data: return {}
stitle = data['series title'].replace('- Episode list', '')
stitle = stitle.replace('- Episodes list', '')
stitle = stitle.replace('- Episode cast', '')
stitle = stitle.replace('- Episodes cast', '')
stitle = stitle.strip()
if not stitle: return {}
seriesID = data['series movieID']
if seriesID is None: return {}
series = Movie(title=stitle, movieID=str(seriesID),
accessSystem=self._as, modFunct=self._modFunct)
nd = {}
for key in data.keys():
if key.startswith('filter-season-') or key.startswith('season-'):
season_key = key.replace('filter-season-', '').replace('season-', '')
try: season_key = int(season_key)
except: pass
nd[season_key] = {}
ep_counter = 1
for episode in data[key]:
if not episode: continue
episode_key = episode.get('episode')
if episode_key is None: continue
if not isinstance(episode_key, int):
episode_key = ep_counter
ep_counter += 1
cast_key = 'Season %s, Episode %s:' % (season_key,
episode_key)
if data.has_key(cast_key):
cast = data[cast_key]
for i in xrange(len(cast)):
cast[i].billingPos = i + 1
episode['cast'] = cast
episode['episode of'] = series
nd[season_key][episode_key] = episode
if len(nd) == 0:
return {}
return {'episodes': nd}
class DOMHTMLEpisodesCastParser(DOMHTMLEpisodesParser):
"""Parser for the "episodes cast" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
eparser = DOMHTMLEpisodesParser()
result = eparser.parse(episodes_html_string)
"""
kind = 'episodes cast'
_episodes_path = "..//h4"
_oad_path = "./following-sibling::b[1]/text()"
class DOMHTMLFaqsParser(DOMParserBase):
"""Parser for the "FAQ" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
fparser = DOMHTMLFaqsParser()
result = fparser.parse(faqs_html_string)
"""
_defGetRefs = True
# XXX: bsoup and lxml don't match (looks like a minor issue, anyway).
extractors = [
Extractor(label='faqs',
path="//div[@class='section']",
attrs=Attribute(key='faqs',
multi=True,
path={
'question': "./h3/a/span/text()",
'answer': "../following-sibling::div[1]//text()"
},
postprocess=lambda x: u'%s::%s' % (x.get('question').strip(),
'\n\n'.join(x.get('answer').replace(
'\n\n', '\n').strip().split('||')))))
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||'),
(re.compile('<h4>(.*?)</h4>\n', re.I), r'||\1--'),
(re.compile('<span class="spoiler"><span>(.*?)</span></span>', re.I),
r'[spoiler]\1[/spoiler]')
]
class DOMHTMLAiringParser(DOMParserBase):
"""Parser for the "airing" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
aparser = DOMHTMLAiringParser()
result = aparser.parse(airing_html_string)
"""
_containsObjects = True
extractors = [
Extractor(label='series title',
path="//title",
attrs=Attribute(key='series title', path="./text()",
postprocess=lambda x: \
x.replace(' - TV schedule', u''))),
Extractor(label='series id',
path="//h1/a[@href]",
attrs=Attribute(key='series id', path="./@href")),
Extractor(label='tv airings',
path="//tr[@class]",
attrs=Attribute(key='airing',
multi=True,
path={
'date': "./td[1]//text()",
'time': "./td[2]//text()",
'channel': "./td[3]//text()",
'link': "./td[4]/a[1]/@href",
'title': "./td[4]//text()",
'season': "./td[5]//text()",
},
postprocess=lambda x: {
'date': x.get('date'),
'time': x.get('time'),
'channel': x.get('channel').strip(),
'link': x.get('link'),
'title': x.get('title'),
'season': (x.get('season') or '').strip()
}
))
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
seriesTitle = data['series title']
seriesID = analyze_imdbid(data['series id'])
if data.has_key('airing'):
for airing in data['airing']:
title = airing.get('title', '').strip()
if not title:
epsTitle = seriesTitle
if seriesID is None:
continue
epsID = seriesID
else:
epsTitle = '%s {%s}' % (data['series title'],
airing['title'])
epsID = analyze_imdbid(airing['link'])
e = Movie(title=epsTitle, movieID=epsID)
airing['episode'] = e
del airing['link']
del airing['title']
if not airing['season']:
del airing['season']
if 'series title' in data:
del data['series title']
if 'series id' in data:
del data['series id']
if 'airing' in data:
data['airing'] = filter(None, data['airing'])
if 'airing' not in data or not data['airing']:
return {}
return data
class DOMHTMLSynopsisParser(DOMParserBase):
"""Parser for the "synopsis" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = HTMLSynopsisParser()
result = sparser.parse(synopsis_html_string)
"""
extractors = [
Extractor(label='synopsis',
path="//div[@class='display'][not(@style)]",
attrs=Attribute(key='synopsis',
path=".//text()",
postprocess=lambda x: '\n\n'.join(x.strip().split('||'))))
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||')
]
class DOMHTMLParentsGuideParser(DOMParserBase):
"""Parser for the "parents guide" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
pgparser = HTMLParentsGuideParser()
result = pgparser.parse(parentsguide_html_string)
"""
extractors = [
Extractor(label='parents guide',
group="//div[@class='section']",
group_key="./h3/a/span/text()",
group_key_normalize=lambda x: x.lower(),
path="../following-sibling::div[1]/p",
attrs=Attribute(key=None,
path=".//text()",
postprocess=lambda x: [t.strip().replace('\n', ' ')
for t in x.split('||') if t.strip()]))
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||')
]
def postprocess_data(self, data):
data2 = {}
for key in data:
if data[key]:
data2[key] = data[key]
if not data2:
return {}
return {'parents guide': data2}
_OBJECTS = {
'movie_parser': ((DOMHTMLMovieParser,), None),
'plot_parser': ((DOMHTMLPlotParser,), None),
'movie_awards_parser': ((DOMHTMLAwardsParser,), None),
'taglines_parser': ((DOMHTMLTaglinesParser,), None),
'keywords_parser': ((DOMHTMLKeywordsParser,), None),
'crazycredits_parser': ((DOMHTMLCrazyCreditsParser,), None),
'goofs_parser': ((DOMHTMLGoofsParser,), None),
'alternateversions_parser': ((DOMHTMLAlternateVersionsParser,), None),
'trivia_parser': ((DOMHTMLTriviaParser,), None),
'soundtrack_parser': ((DOMHTMLSoundtrackParser,), {'kind': 'soundtrack'}),
'quotes_parser': ((DOMHTMLQuotesParser,), None),
'releasedates_parser': ((DOMHTMLReleaseinfoParser,), None),
'ratings_parser': ((DOMHTMLRatingsParser,), None),
'officialsites_parser': ((DOMHTMLOfficialsitesParser,), None),
'criticrev_parser': ((DOMHTMLCriticReviewsParser,),
{'kind': 'critic reviews'}),
'externalrev_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'external reviews'}),
'newsgrouprev_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'newsgroup reviews'}),
'misclinks_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'misc links'}),
'soundclips_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'sound clips'}),
'videoclips_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'video clips'}),
'photosites_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'photo sites'}),
'connections_parser': ((DOMHTMLConnectionParser,), None),
'tech_parser': ((DOMHTMLTechParser,), None),
'business_parser': ((DOMHTMLTechParser,),
{'kind': 'business', '_defGetRefs': 1}),
'literature_parser': ((DOMHTMLTechParser,), {'kind': 'literature'}),
'locations_parser': ((DOMHTMLLocationsParser,), None),
'rec_parser': ((DOMHTMLRecParser,), None),
'news_parser': ((DOMHTMLNewsParser,), None),
'episodes_parser': ((DOMHTMLEpisodesParser,), None),
'season_episodes_parser': ((DOMHTMLSeasonEpisodesParser,), None),
'episodes_cast_parser': ((DOMHTMLEpisodesCastParser,), None),
'eprating_parser': ((DOMHTMLEpisodesRatings,), None),
'movie_faqs_parser': ((DOMHTMLFaqsParser,), None),
'airing_parser': ((DOMHTMLAiringParser,), None),
'synopsis_parser': ((DOMHTMLSynopsisParser,), None),
'parentsguide_parser': ((DOMHTMLParentsGuideParser,), None)
}
| gpl-3.0 | -912,132,293,669,755,400 | 40.583848 | 113 | 0.465444 | false |
jucimarjr/IPC_2017-1 | lista06/lista06_lista02_questao25.py | 1 | 1529 | #---------------------------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
# FANG YAO 1115180236
# LUIZ PAULO MACHADO E SOUZA 1515200542
# FELIPE GUERREIRO DE MELLO 1315120052
# YURI LEANDRO DE AQUINO SILVA 1615100462
#
#25) Fazer um algoritmo em PORTUGOL que:
#a) Leia um conjunto de valores inteiros correspondentes a 80 notas dos alunos de uma
#turma, notas estas que variam de 0 a 10;
#b) Calcule a freqüência absoluta e a freqüência relativa de cada nota;
#c) Imprima uma tabela contendo os valores das notas (de 0 a 10) e suas respectivas
#freqüências absoluta e relativa.
#Observações:
#1. Freqüência absoluta de uma nota é o número de vezes em que aparece no conjunto de
#dados;
#2. Freqüência relativa é a freqüência absoluta divida pelo número total de dados;
#3. Utilizar como variável composta somente aquelas que forem necessárias.
list=[]
for i in range (0,80):
n=float(input())
if(n<0 or n>10 ):
print("nota invalida")
list.append(n=float(input()))
else:
list.append(n)
list_2=[]
for i in range (0,80):
list_2.append(int(list.count(list[i])))
list_3=[]
for i in range (0,80):
list_3.append(list_2[i]/10)
for i in range(0,80):
print("Nota: ",list[i],"Frequência Absoluta: ",list_2[i],"Frequência Relativa: ",list_3[i])
| apache-2.0 | -2,593,557,404,899,977,700 | 37.552632 | 95 | 0.627412 | false |
umlfri/umlfri2 | umlfri2/metamodel/projecttemplate/project.py | 1 | 1171 | class ProjectTemplate:
def __init__(self, id, elements, connections, diagrams):
self.__id = id
self.__elements = elements
self.__connections = connections
self.__diagrams = diagrams
self.__metamodel = None
def _set_metamodel(self, metamodel):
self.__metamodel = metamodel
@property
def id(self):
return self.__id
@property
def icon(self):
return self.__metamodel.addon.icon
@property
def addon(self):
return self.__metamodel.addon
@property
def metamodel(self):
return self.__metamodel
@property
def elements(self):
yield from self.__elements
@property
def connections(self):
yield from self.__connections
@property
def diagrams(self):
yield from self.__diagrams
def compile(self):
for element in self.__elements:
element._compile(self.__metamodel)
for connection in self.__connections:
connection._compile(self.__metamodel)
for diagram in self.__diagrams:
diagram._compile(self.__metamodel)
| gpl-3.0 | 2,747,092,450,215,675,000 | 23.395833 | 60 | 0.578992 | false |
ageron/tensorflow | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 13 | 6775 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support (deprecated)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
USE OF THIS EXCEPTION IS DEPRECATED.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.model_selection import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 | -7,228,258,770,837,343,000 | 30.658879 | 91 | 0.656384 | false |
sfstpala/Victory-Chat | modules/chat.py | 1 | 12486 |
import time
import json
import cherrypy
import markdown
import urllib.parse
import functools
import collections
import hashlib
def jsonify(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
cherrypy.response.headers["Content-Type"] = "application/json"
return json.dumps(function(*args, **kwargs)).encode()
return wrapper
class ChatMixIn (object):
def create_new_room(self, room):
self.db.request("POST", "/%s_rooms/" %
(self.couchdb_prefix), body=room)
self.db.request("PUT", "/%s_room_%s_messages" % (
self.couchdb_prefix, room["num_id"]))
self.db.request("PUT", "/%s_room_%s_messages/_design/messages/" % (
self.couchdb_prefix, room["num_id"]), body={
'_id': "_design/by_date",
'language': 'javascript',
"views": {
"by_date": {
"map": "function (doc) { emit(doc[\"date\"], doc); }"
}
}
})
self.db.request("PUT", "/%s_room_%s_messages/_design/highlight/" % (
self.couchdb_prefix, room["num_id"]), body={
'_id': "_design/user_by_message",
'language': 'javascript',
"views": {
"user_by_message": {
"map": "function (doc) { emit(doc[\"num_id\"], doc[\"user\"]); }"
}
}
})
return room["num_id"]
def find_room(self, num_id):
rooms = self.db.request("GET", "/%s_rooms/_design/"
"by_num_id/_view/by_num_id/" % self.couchdb_prefix)
s = [i for i in rooms['rows'] if i["key"] == num_id]
return self.db.request("GET", "/%s_rooms/%s" %
(self.couchdb_prefix, s[0]['id'])) if s else None
def get_n_rooms(self):
try:
return len(self.db.request("GET", ("/%s_rooms/_design/by_msg_count/"
"_view/by_msg_count/") % self.couchdb_prefix)["rows"])
except KeyError:
return 0
@cherrypy.expose
def post_message(self, room, message):
if cherrypy.request.method != "POST":
raise cherrypy.HTTPError(400)
user = self.get_user()
if not user:
raise cherrypy.HTTPError(402)
reply, highlight, prepend = None, [], None
if message.startswith("::") and " " in message:
reply = message.split()[0][2:]
r_msg = self.find_message(room, reply)
if r_msg:
message = message.replace("::" + reply, "")
highlight = [r_msg]
p = self.find_user(r_msg)
if p:
if p["name"]:
p = ("@" + p["name"].split()[0])
else:
p = "@User" + p["num_id"]
prepend = p
else:
reply = None
for i in [j[1:] for j in message.lower().split() if j.startswith("@")]:
u = list(self.find_user_by_name(''.join(j for j in i if j in
"abcdefghijklmnopqrstuvwxyz")))
if u:
highlight.extend(u)
self.update_user(user["_id"], {'last_seen_date': time.time()})
message = {
'raw': self.safe(message),
'markdown': self.onebox_html(self.markdown(self.onebox(message))),
'date': time.time(),
'user': user["num_id"],
'num_id': self.get_num_id(),
'stars': [],
'reply_to': reply,
'highlight': highlight,
'prepend': prepend,
}
self.inc_message_count(room)
self.do_get_messages.cache_clear()
self.db.request("POST", "/%s_room_%s_messages/" %
(self.couchdb_prefix, room), body=message)
def find_user_by_name(self, name):
users = self.db.request("GET", "/%s_users/_design/"
"by_name/_view/by_name/" % self.couchdb_prefix)
name = ''.join(i for i in name.lower() if i in
"abcdefghijklmnopqrstuvwxyz0123456789")
if "rows" in users:
for i in users["rows"]:
n, i = i["key"], i["value"]
if n and name.strip() == n.split()[0].lower().strip():
yield i
def find_message(self, room, num_id):
r = self.db.request("POST", ("/%s_room_%s_messages/_design/" +
"highlight/_view/user_by_message/") %
(self.couchdb_prefix, room))
if "rows" in r:
r = [i for i in r["rows"] if i["key"] == num_id]
return r[0]["value"] if r else None
def markdown(self, content):
return markdown.markdown(content, safe_mode="escape")
@jsonify
@cherrypy.expose
def read_messages(self, room="", last_atime=""):
if self.get_user():
self.active_users[room][self.get_user()["num_id"]] = time.time()
return self.do_get_messages(room, last_atime)
raise cherrypy.HTTPError(404)
@functools.lru_cache(100)
def do_get_messages(self, room, last_atime):
if float(last_atime) <= 1:
last_atime = time.time() - (60 * 60 * 24 * 7)
user = self.get_user()
if not user:
raise cherrypy.HTTPError(404)
room_data = self.find_room(room)
if room_data["type"] == "private" and user["num_id"] not in room_data["users"]:
raise cherrypy.HTTPError(404)
data = self.db.request("GET", ("/%s_room_%s_messages/"
"_design/messages/_view/by_date/?startkey=%s") % (
self.couchdb_prefix, room, json.dumps(float(last_atime))))
if "rows" in data:
data = [i['value'] for i in data["rows"]]
else:
data = []
data = list(sorted(data, key=lambda x: x["date"]))
if len(data) >= 50:
data = data[-50:]
for i in data:
i['user'] = self.find_user(i["user"])
del i["user"]["email"]
del i["user"]["identity"]
del i["user"]["website"]
del i["user"]["creation_date"]
del i["user"]["openid_provider"]
del i["user"]["country"]
del i["user"]["_rev"]
del i["user"]["messages"]
del i["user"]["last_login_date"]
del i["user"]["last_login_ip"]
del i["user"]["about_me"]
del i["user"]["owns_rooms"]
del i["user"]["birthday"]
del i["user"]["last_seen_date"]
i["user"]["name"] = self.safe(i["user"]["name"])
if data:
last_atime = max(i["date"] for i in data)
for i in data:
i["nice_date"] = time.strftime("%Y-%m-%d %H:%MZ", time.gmtime(i["date"]))
return {'last_atime': last_atime, 'data': data}
def inc_message_count(self, num_id):
room = self.find_room(num_id)
room["n_messages"] += 1
self.db.request("PUT", "/%s_rooms/%s" % (
self.couchdb_prefix, room["_id"]), body=room)
@cherrypy.expose
def star(self, room, message_id):
user = self.get_user()
if not user:
raise cherrypy.HTTPError(404)
d = self.db.request("GET", "/%s_room_%s_messages/%s" % (
self.couchdb_prefix, room, message_id))
if user["num_id"] not in d["stars"] and user["num_id"] != d["user"]:
d["stars"].append(user["num_id"])
self.get_starred.cache_clear()
self.db.request("PUT", "/%s_room_%s_messages/%s" % (
self.couchdb_prefix, room, message_id), body=d)
cherrypy.response.headers["Content-Type"] = "application/json"
return json.dumps(True).encode()
@cherrypy.expose
def unstar(self, room, message_id):
user = self.get_user()
if not user:
raise cherrypy.HTTPError(404)
d = self.db.request("GET", "/%s_room_%s_messages/%s" % (
self.couchdb_prefix, room, message_id))
if user["num_id"] in d["stars"] and user["num_id"] != d["user"]:
d["stars"].remove(user["num_id"])
self.get_starred.cache_clear()
self.db.request("PUT", "/%s_room_%s_messages/%s" % (
self.couchdb_prefix, room, message_id), body=d)
cherrypy.response.headers["Content-Type"] = "application/json"
return json.dumps(True).encode()
@jsonify
@cherrypy.expose
@functools.lru_cache(100)
def get_starred(self, room=""):
if self.get_user():
last_atime = time.time() - (60 * 60 * 24)
data = self.db.request("GET", ("/%s_room_%s_messages/"
"_design/messages/_view/by_date/?startkey=%s") % (
self.couchdb_prefix, room, json.dumps(float(last_atime))))
if "rows" in data:
data = [i['value'] for i in data["rows"]]
else:
data = []
for i in data:
i['user'] = self.find_user(i["user"])
del i["user"]["email"]
del i["user"]["identity"]
del i["user"]["website"]
del i["user"]["creation_date"]
del i["user"]["openid_provider"]
del i["user"]["country"]
del i["user"]["_rev"]
del i["user"]["messages"]
del i["user"]["last_login_date"]
del i["user"]["last_login_ip"]
del i["user"]["about_me"]
del i["user"]["owns_rooms"]
del i["user"]["birthday"]
del i["user"]["last_seen_date"]
i["user"]["name"] = self.safe(i["user"]["name"])
data = sorted(data, key=lambda x: len(x["stars"]), reverse=True)
data = list(i for i in data if i["stars"])[:10]
data = sorted(data, key=lambda x: x["date"], reverse=True)
cherrypy.response.headers["Content-Type"] = "application/json"
h = hashlib.md5("".join(i["_id"] + str(len(i["stars"]))
for i in data).encode()).hexdigest()
for i in data:
i["nice_date"] = time.strftime("%Y-%m-%d %H:%MZ", time.gmtime(i["date"]))
return {'last_atime': last_atime, 'data': data, 'hash': h}
raise cherrypy.HTTPError(404)
@jsonify
@cherrypy.expose
def get_user_list(self, room):
self.active_users[room] = {k: v for k, v in
self.active_users[room].items() if time.time() - v < 10}
r = list(self.find_user(i) for i in self.active_users[room].keys())
for i in r:
del i["email"]
del i["identity"]
del i["website"]
del i["creation_date"]
del i["openid_provider"]
del i["country"]
del i["_rev"]
del i["messages"]
del i["last_login_date"]
del i["last_login_ip"]
del i["about_me"]
del i["owns_rooms"]
del i["birthday"]
del i["last_seen_date"]
i["name"] = self.safe(i["name"])
data = sorted(r, key=lambda x: x["num_id"] or "")
h = hashlib.md5("".join(i["_id"]
for i in data).encode()).hexdigest()
return {"data": data, "hash": h}
@cherrypy.expose
def remove_user_from_room(self, room, num_id):
room_num_id = room
room = self.find_room(room)
user = self.get_user()
if user and user["_id"] == room["owner"]:
if num_id in room["users"]:
room["users"].remove(num_id)
self.db.request("PUT", "/%s_rooms/%s" % (
self.couchdb_prefix, room["_id"]), body=room)
self.do_get_messages.cache_clear()
raise cherrypy.HTTPRedirect("/rooms/%s" % room_num_id)
@jsonify
@cherrypy.expose
def add_user_to_room(self, room, user_url=""):
room_num_id = room
room = self.find_room(room)
user = self.get_user()
if user and user["_id"] == room["owner"]:
u = urllib.parse.urlparse(user_url).path.split("/")
if len(u) > 2 and u[1] == "users" and self.find_user(u[2]):
if u[2] not in room["users"]:
room["users"].append(u[2])
self.db.request("PUT", "/%s_rooms/%s" % (
self.couchdb_prefix, room["_id"]), body=room)
raise cherrypy.HTTPRedirect("/rooms/%s" % room_num_id)
| isc | -2,532,584,712,971,390,500 | 39.14791 | 89 | 0.493032 | false |
hpcloud-mon/tempest | tempest/api/volume/admin/test_volume_types_extra_specs_negative.py | 3 | 6184 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.volume import base
from tempest import test
class ExtraSpecsNegativeV2Test(base.BaseVolumeAdminTest):
@classmethod
def resource_setup(cls):
super(ExtraSpecsNegativeV2Test, cls).resource_setup()
vol_type_name = data_utils.rand_name('Volume-type')
cls.extra_specs = {"spec1": "val1"}
cls.volume_type = cls.volume_types_client.create_volume_type(
vol_type_name,
extra_specs=cls.extra_specs)
@classmethod
def resource_cleanup(cls):
cls.volume_types_client.delete_volume_type(cls.volume_type['id'])
super(ExtraSpecsNegativeV2Test, cls).resource_cleanup()
@test.attr(type='gate')
@test.idempotent_id('08961d20-5cbb-4910-ac0f-89ad6dbb2da1')
def test_update_no_body(self):
# Should not update volume type extra specs with no body
extra_spec = {"spec1": "val2"}
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.update_volume_type_extra_specs,
self.volume_type['id'], extra_spec.keys()[0], None)
@test.attr(type='gate')
@test.idempotent_id('25e5a0ee-89b3-4c53-8310-236f76c75365')
def test_update_nonexistent_extra_spec_id(self):
# Should not update volume type extra specs with nonexistent id.
extra_spec = {"spec1": "val2"}
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.update_volume_type_extra_specs,
self.volume_type['id'], str(uuid.uuid4()),
extra_spec)
@test.attr(type='gate')
@test.idempotent_id('9bf7a657-b011-4aec-866d-81c496fbe5c8')
def test_update_none_extra_spec_id(self):
# Should not update volume type extra specs with none id.
extra_spec = {"spec1": "val2"}
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.update_volume_type_extra_specs,
self.volume_type['id'], None, extra_spec)
@test.attr(type='gate')
@test.idempotent_id('a77dfda2-9100-448e-9076-ed1711f4bdfc')
def test_update_multiple_extra_spec(self):
# Should not update volume type extra specs with multiple specs as
# body.
extra_spec = {"spec1": "val2", 'spec2': 'val1'}
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.update_volume_type_extra_specs,
self.volume_type['id'], extra_spec.keys()[0],
extra_spec)
@test.attr(type='gate')
@test.idempotent_id('49d5472c-a53d-4eab-a4d3-450c4db1c545')
def test_create_nonexistent_type_id(self):
# Should not create volume type extra spec for nonexistent volume
# type id.
extra_specs = {"spec2": "val1"}
self.assertRaises(
lib_exc.NotFound,
self.volume_types_client.create_volume_type_extra_specs,
str(uuid.uuid4()), extra_specs)
@test.attr(type='gate')
@test.idempotent_id('c821bdc8-43a4-4bf4-86c8-82f3858d5f7d')
def test_create_none_body(self):
# Should not create volume type extra spec for none POST body.
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.create_volume_type_extra_specs,
self.volume_type['id'], None)
@test.attr(type='gate')
@test.idempotent_id('bc772c71-1ed4-4716-b945-8b5ed0f15e87')
def test_create_invalid_body(self):
# Should not create volume type extra spec for invalid POST body.
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.create_volume_type_extra_specs,
self.volume_type['id'], ['invalid'])
@test.attr(type='gate')
@test.idempotent_id('031cda8b-7d23-4246-8bf6-bbe73fd67074')
def test_delete_nonexistent_volume_type_id(self):
# Should not delete volume type extra spec for nonexistent
# type id.
extra_specs = {"spec1": "val1"}
self.assertRaises(
lib_exc.NotFound,
self.volume_types_client.delete_volume_type_extra_specs,
str(uuid.uuid4()), extra_specs.keys()[0])
@test.attr(type='gate')
@test.idempotent_id('dee5cf0c-cdd6-4353-b70c-e847050d71fb')
def test_list_nonexistent_volume_type_id(self):
# Should not list volume type extra spec for nonexistent type id.
self.assertRaises(
lib_exc.NotFound,
self.volume_types_client.list_volume_types_extra_specs,
str(uuid.uuid4()))
@test.attr(type='gate')
@test.idempotent_id('9f402cbd-1838-4eb4-9554-126a6b1908c9')
def test_get_nonexistent_volume_type_id(self):
# Should not get volume type extra spec for nonexistent type id.
extra_specs = {"spec1": "val1"}
self.assertRaises(
lib_exc.NotFound,
self.volume_types_client.show_volume_type_extra_specs,
str(uuid.uuid4()), extra_specs.keys()[0])
@test.attr(type='gate')
@test.idempotent_id('c881797d-12ff-4f1a-b09d-9f6212159753')
def test_get_nonexistent_extra_spec_id(self):
# Should not get volume type extra spec for nonexistent extra spec
# id.
self.assertRaises(
lib_exc.NotFound,
self.volume_types_client.show_volume_type_extra_specs,
self.volume_type['id'], str(uuid.uuid4()))
class ExtraSpecsNegativeV1Test(ExtraSpecsNegativeV2Test):
_api_version = 1
| apache-2.0 | 2,315,887,284,898,949,000 | 38.896774 | 78 | 0.647154 | false |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/theano/tensor/tests/test_blas_c.py | 1 | 12000 | from __future__ import absolute_import, print_function, division
import sys
import numpy
from unittest import TestCase
from nose.plugins.skip import SkipTest
import theano
import theano.tensor as tensor
from theano.tensor.blas_c import CGer
from theano.tensor.blas_scipy import ScipyGer
from theano.tensor.blas import Ger
from theano.tensor.blas_c import CGemv
from theano.tensor.blas import Gemv
from theano.tensor.blas_c import check_force_gemv_init
from theano.tests import unittest_tools
from theano.tests.unittest_tools import TestOptimizationMixin
from theano.tensor.tests.test_blas import BaseGemv, TestBlasStrides
mode_blas_opt = theano.compile.get_default_mode().including(
'BlasOpt', 'specialize', 'InplaceBlasOpt', 'c_blas')
def skip_if_blas_ldflags_empty(*functions_detected):
if theano.config.blas.ldflags == "":
functions_string = ""
if functions_detected:
functions_string = " (at least " + (", ".join(functions_detected)) + ")"
raise SkipTest("This test is useful only when Theano can access to BLAS functions" + functions_string + " other than [sd]gemm_.")
class TestCGer(TestCase, TestOptimizationMixin):
def setUp(self, dtype='float64'):
# This tests can run even when theano.config.blas.ldflags is empty.
self.dtype = dtype
self.mode = theano.compile.get_default_mode().including('fast_run')
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.a = tensor.tensor(dtype=dtype, broadcastable=())
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.Aval = numpy.ones((2, 3), dtype=dtype)
self.xval = numpy.asarray([1, 2], dtype=dtype)
self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
def function(self, inputs, outputs):
return theano.function(inputs, outputs,
mode=self.mode,
# allow_inplace=True,
)
def run_f(self, f):
f(self.Aval, self.xval, self.yval)
f(self.Aval[::-1, ::-1], self.xval, self.yval)
def b(self, bval):
return tensor.as_tensor_variable(numpy.asarray(bval, dtype=self.dtype))
def test_eq(self):
self.assertTrue(CGer(True) == CGer(True))
self.assertTrue(CGer(False) == CGer(False))
self.assertTrue(CGer(False) != CGer(True))
self.assertTrue(CGer(True) != ScipyGer(True))
self.assertTrue(CGer(False) != ScipyGer(False))
self.assertTrue(CGer(True) != Ger(True))
self.assertTrue(CGer(False) != Ger(False))
# assert that eq works for non-CGer instances
self.assertTrue(CGer(False) is not None)
self.assertTrue(CGer(True) is not None)
def test_hash(self):
self.assertTrue(hash(CGer(True)) == hash(CGer(True)))
self.assertTrue(hash(CGer(False)) == hash(CGer(False)))
self.assertTrue(hash(CGer(False)) != hash(CGer(True)))
def test_optimization_pipeline(self):
skip_if_blas_ldflags_empty()
f = self.function([self.x, self.y], tensor.outer(self.x, self.y))
self.assertFunctionContains(f, CGer(destructive=True))
f(self.xval, self.yval) # DebugMode tests correctness
def test_optimization_pipeline_float(self):
skip_if_blas_ldflags_empty()
self.setUp('float32')
f = self.function([self.x, self.y], tensor.outer(self.x, self.y))
self.assertFunctionContains(f, CGer(destructive=True))
f(self.xval, self.yval) # DebugMode tests correctness
def test_int_fails(self):
self.setUp('int32')
f = self.function([self.x, self.y], tensor.outer(self.x, self.y))
self.assertFunctionContains0(f, CGer(destructive=True))
self.assertFunctionContains0(f, CGer(destructive=False))
def test_A_plus_outer(self):
skip_if_blas_ldflags_empty()
f = self.function([self.A, self.x, self.y],
self.A + tensor.outer(self.x, self.y))
self.assertFunctionContains(f, CGer(destructive=False))
self.run_f(f) # DebugMode tests correctness
def test_A_plus_scaled_outer(self):
skip_if_blas_ldflags_empty()
f = self.function([self.A, self.x, self.y],
self.A + 0.1 * tensor.outer(self.x, self.y))
self.assertFunctionContains(f, CGer(destructive=False))
self.run_f(f) # DebugMode tests correctness
class TestCGemv(TestCase, TestOptimizationMixin):
"""Tests of CGemv specifically.
Generic tests of Gemv-compatibility, including both dtypes are
done below in TestCGemvFloat32 and TestCGemvFloat64
"""
def setUp(self, dtype='float64'):
# This tests can run even when theano.config.blas.ldflags is empty.
self.dtype = dtype
self.mode = theano.compile.get_default_mode().including('fast_run')
# matrix
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.Aval = numpy.ones((2, 3), dtype=dtype)
# vector
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.xval = numpy.asarray([1, 2], dtype=dtype)
self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
# scalar
self.a = tensor.tensor(dtype=dtype, broadcastable=())
def test_nan_beta_0(self):
mode = self.mode.including()
mode.check_isfinite = False
f = theano.function([self.A, self.x, self.y, self.a],
self.a*self.y + theano.dot(self.A, self.x),
mode=mode)
Aval = numpy.ones((3, 1), dtype=self.dtype)
xval = numpy.ones((1,), dtype=self.dtype)
yval = float('NaN') * numpy.ones((3,), dtype=self.dtype)
zval = f(Aval, xval, yval, 0)
assert not numpy.isnan(zval).any()
def test_optimizations_vm(self):
skip_if_blas_ldflags_empty()
''' Test vector dot matrix '''
f = theano.function([self.x, self.A],
theano.dot(self.x, self.A),
mode=self.mode)
# Assert that the dot was optimized somehow
self.assertFunctionContains0(f, tensor.dot)
self.assertFunctionContains1(
f,
CGemv(inplace=True)
)
# Assert they produce the same output
assert numpy.allclose(f(self.xval, self.Aval),
numpy.dot(self.xval, self.Aval))
# Test with negative strides on 2 dims
assert numpy.allclose(f(self.xval, self.Aval[::-1, ::-1]),
numpy.dot(self.xval, self.Aval[::-1, ::-1]))
def test_optimizations_mv(self):
skip_if_blas_ldflags_empty()
''' Test matrix dot vector '''
f = theano.function([self.A, self.y],
theano.dot(self.A, self.y),
mode=self.mode)
# Assert that the dot was optimized somehow
self.assertFunctionContains0(f, tensor.dot)
self.assertFunctionContains1(
f,
CGemv(inplace=True)
)
# Assert they produce the same output
assert numpy.allclose(f(self.Aval, self.yval),
numpy.dot(self.Aval, self.yval))
# Test with negative strides on 2 dims
assert numpy.allclose(f(self.Aval[::-1, ::-1], self.yval),
numpy.dot(self.Aval[::-1, ::-1], self.yval))
def test_force_gemv_init(self):
if check_force_gemv_init():
sys.stderr.write(
"WARNING: The current BLAS requires Theano to initialize"
+ " memory for some GEMV calls which will result in a minor"
+ " degradation in performance for such calls."
)
def t_gemv1(self, m_shp):
''' test vector2 + dot(matrix, vector1) '''
rng = numpy.random.RandomState(unittest_tools.fetch_seed())
v1 = theano.shared(numpy.array(rng.uniform(size=(m_shp[1],)),
dtype='float32'))
v2_orig = numpy.array(rng.uniform(size=(m_shp[0],)), dtype='float32')
v2 = theano.shared(v2_orig)
m = theano.shared(numpy.array(rng.uniform(size=m_shp),
dtype='float32'))
f = theano.function([], v2 + tensor.dot(m, v1),
mode=self.mode)
# Assert they produce the same output
assert numpy.allclose(f(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = [n.op for n in f.maker.fgraph.toposort()]
assert topo == [CGemv(inplace=False)], topo
# test the inplace version
g = theano.function([], [],
updates=[(v2, v2 + theano.dot(m, v1))],
mode=self.mode)
# Assert they produce the same output
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = [n.op for n in g.maker.fgraph.toposort()]
assert topo == [CGemv(inplace=True)]
# Do the same tests with a matrix with strides in both dimensions
m.set_value(
m.get_value(borrow=True)[::-1, ::-1],
borrow=True)
v2.set_value(v2_orig)
assert numpy.allclose(f(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
def test_gemv1(self):
skip_if_blas_ldflags_empty()
self.t_gemv1((3, 2))
self.t_gemv1((1, 2))
self.t_gemv1((0, 2))
self.t_gemv1((3, 1))
self.t_gemv1((3, 0))
self.t_gemv1((1, 0))
self.t_gemv1((0, 1))
self.t_gemv1((0, 0))
def test_gemv_dimensions(self, dtype='float32'):
alpha = theano.shared(theano._asarray(1.0, dtype=dtype),
name='alpha')
beta = theano.shared(theano._asarray(1.0, dtype=dtype),
name='beta')
z = beta * self.y + alpha * tensor.dot(self.A, self.x)
f = theano.function([self.A, self.x, self.y], z,
mode=self.mode)
# Matrix value
A_val = numpy.ones((5, 3), dtype=dtype)
# Different vector length
ones_3 = numpy.ones(3, dtype=dtype)
ones_4 = numpy.ones(4, dtype=dtype)
ones_5 = numpy.ones(5, dtype=dtype)
ones_6 = numpy.ones(6, dtype=dtype)
f(A_val, ones_3, ones_5)
f(A_val[::-1, ::-1], ones_3, ones_5)
self.assertRaises(ValueError, f, A_val, ones_4, ones_5)
self.assertRaises(ValueError, f, A_val, ones_3, ones_6)
self.assertRaises(ValueError, f, A_val, ones_4, ones_6)
def test_multiple_inplace(self):
skip_if_blas_ldflags_empty()
x = tensor.dmatrix('x')
y = tensor.dvector('y')
z = tensor.dvector('z')
f = theano.function([x, y, z],
[tensor.dot(y, x), tensor.dot(z,x)],
mode=mode_blas_opt)
vx = numpy.random.rand(3, 3)
vy = numpy.random.rand(3)
vz = numpy.random.rand(3)
out = f(vx, vy, vz)
assert numpy.allclose(out[0], numpy.dot(vy, vx))
assert numpy.allclose(out[1], numpy.dot(vz, vx))
assert len([n for n in f.maker.fgraph.apply_nodes
if isinstance(n.op, tensor.AllocEmpty)]) == 2
class TestCGemvFloat32(TestCase, BaseGemv, TestOptimizationMixin):
mode = mode_blas_opt
dtype = 'float32'
gemv = CGemv(inplace=False)
gemv_inplace = CGemv(inplace=True)
def setUp(self):
skip_if_blas_ldflags_empty()
class TestCGemvFloat64(TestCase, BaseGemv, TestOptimizationMixin):
mode = mode_blas_opt
dtype = 'float64'
gemv = CGemv(inplace=False)
gemv_inplace = CGemv(inplace=True)
def setUp(self):
skip_if_blas_ldflags_empty()
class TestBlasStridesC(TestBlasStrides):
mode = mode_blas_opt
| agpl-3.0 | 3,789,379,958,906,507,000 | 36.5 | 137 | 0.594 | false |
gugod/searx | searx/engines/generalfile.py | 10 | 1463 | """
General Files (Files)
@website http://www.general-files.org
@provide-api no (nothing found)
@using-api no (because nothing found)
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content
@todo detect torrents?
"""
from lxml import html
# engine dependent config
categories = ['files']
paging = True
# search-url
base_url = 'http://www.general-file.com'
search_url = base_url + '/files-{letter}/{query}/{pageno}'
# specific xpath variables
result_xpath = '//table[@class="block-file"]'
title_xpath = './/h2/a//text()'
url_xpath = './/h2/a/@href'
content_xpath = './/p//text()'
# do search-request
def request(query, params):
params['url'] = search_url.format(query=query,
letter=query[0],
pageno=params['pageno'])
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
# parse results
for result in dom.xpath(result_xpath):
url = result.xpath(url_xpath)[0]
# skip fast download links
if not url.startswith('/'):
continue
# append result
results.append({'url': base_url + url,
'title': ''.join(result.xpath(title_xpath)),
'content': ''.join(result.xpath(content_xpath))})
# return results
return results
| agpl-3.0 | -6,921,181,586,945,908,000 | 22.596774 | 73 | 0.583049 | false |
rudhir-upretee/Sumo17_With_Netsim | tools/assign/one-shot.py | 1 | 8737 | #!/usr/bin/env python
"""
@file one-shot.py
@author Daniel Krajzewicz
@author Jakob Erdmann
@author Yun-Pang Wang
@author Michael Behrisch
@date 2008-03-10
@version $Id: one-shot.py 13811 2013-05-01 20:31:43Z behrisch $
This script does multiple sumo runs with different rerouting intervals.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from __future__ import print_function
import os, sys, subprocess
from datetime import datetime
from optparse import OptionParser
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'lib'))
from testUtil import checkBinary
def call(command, log):
print("-" * 79, file=log)
print(command, file=log)
retCode = subprocess.call(command, stdout=log, stderr=log)
if retCode != 0:
print("Execution of %s failed. Look into %s for details." % (command, log.name), file=sys.stderr)
sys.exit(retCode)
def writeSUMOConf(step, options, files):
fd = open("one_shot_" + str(step) + ".sumocfg", "w")
print("""<configuration>
<files>
<net-file value="%s"/>
<route-files value="%s"/>
<vehroutes value="vehroutes_%s.xml"/>""" % (options.net, files, step), file=fd)
if not options.noSummary:
print(' <summary value="summary_%s.xml"/>' % step, file=fd)
if not options.noTripinfo:
print(' <tripinfo value="tripinfo_%s.xml"/>' % step, file=fd)
if options.weightfiles:
print(' <weight-files value="%s"/>' % options.weightfiles, file=fd)
add = 'dump_%s.add.xml' % step
if options.costmodifier != 'None':
add = '%s_dump_%s.add.xml' % (options.costmodifier, step)
if options.additional:
add += "," + options.additional
print(""" <additional-files value="%s"/>
</files>
<process>
<begin value="%s"/>
<route-steps value="%s"/>""" % (add, options.begin, options.routeSteps), file=fd)
if options.end:
print(' <end value="%s"/>' % options.end, file=fd)
if options.mesosim:
print(' <mesosim value="True"/>', file=fd)
if options.routingalgorithm:
print(' <routing-algorithm value="%s"/>' % options.routingalgorithm, file=fd)
print(""" <device.rerouting.probability value="1"/>
<device.rerouting.period value="%s"/>
<device.rerouting.adaptation-interval value="%s"/>
<device.rerouting.with-taz value="%s"/>
<device.rerouting.explicit value="%s"/>
<vehroute-output.last-route value="%s"/>
<vehroute-output.exit-times value="%s"/>
<vehroute-output.sorted value="%s"/>
</process>
<reports>
<verbose value="True"/>
<no-warnings value="%s"/>
</reports>
</configuration>""" % (step, options.updateInterval, options.withtaz, options.reroutingexplicit, options.lastRoutes, options.withexittime, options.routesorted, not options.withWarnings), file=fd)
fd.close()
if options.costmodifier != 'None':
fd = open("%s_dump_%s.add.xml" % (options.costmodifier, step), "w")
print("""<a>
<edgeData id="%s_dump_%s_%s" freq="%s" file="%s_dump_%s_%s.xml" excludeEmpty="true"/>
</a>""" % (options.costmodifier, step, options.aggregation, options.aggregation, options.costmodifier, step, options.aggregation), file=fd)
fd.close()
else:
fd = open("dump_%s.add.xml" % step, "w")
print("""<a>
<edgeData id="dump_%s_%s" freq="%s" file="dump_%s_%s.xml" excludeEmpty="true"/>
</a>""" % (step, options.aggregation, options.aggregation, step, options.aggregation), file=fd)
fd.close()
optParser = OptionParser()
optParser.add_option("-W", "--with-warnings", action="store_true", dest="withWarnings",
default=False, help="enables warnings")
optParser.add_option("-n", "--net-file", dest="net",
help="SUMO network (mandatory)", metavar="FILE")
optParser.add_option("-t", "--trips", dest="trips",
help="trips in step 0", metavar="FILE")
optParser.add_option("-b", "--begin", dest="begin",
type="int", default=0, help="Set simulation/routing begin")
optParser.add_option("-e", "--end", dest="end",
type="int", help="Set simulation/routing end")
optParser.add_option("-R", "--route-steps", dest="routeSteps",
type="int", default=200, help="Set simulation route steps")
optParser.add_option("-a", "--aggregation", dest="aggregation",
type="int", default=900, help="Set main weights aggregation period")
optParser.add_option("-f", "--frequencies", dest="frequencies",
default="-1,1800,300,15", help="Set the frequencies to iterate over")
optParser.add_option("-i", "--adaptation-interval", dest="updateInterval",
type="int", default=1, help="Set edge weight adaptation interval")
optParser.add_option("-E", "--disable-summary", "--disable-emissions", action="store_true", dest="noSummary",
default=False, help="No summaries are written by the simulation")
optParser.add_option("-T", "--disable-tripinfos", action="store_true", dest="noTripinfo",
default=False, help="No tripinfos are written by the simulation")
optParser.add_option("-m", "--mesosim", action="store_true", dest="mesosim",
default=False, help="Whether mesosim shall be used")
optParser.add_option("-w", "--with-taz", action="store_true", dest="withtaz",
default=False, help="Whether districts shall be used")
optParser.add_option("-+", "--additional", dest="additional",
default="", help="Additional files")
optParser.add_option("-L", "--lastRoutes", action="store_true", dest="lastRoutes",
default=False, help="only save the last routes in the vehroute-output")
optParser.add_option("-F", "--weight-files", dest="weightfiles",
help="Load edge/lane weights from FILE", metavar="FILE")
optParser.add_option("-A", "--routing-algorithm", dest="routingalgorithm", type="choice",
choices=('dijkstra', 'astar'),
default="astar", help="type of routing algorithm [default: %default]")
optParser.add_option("-r", "--rerouting-explicit", dest="reroutingexplicit", type="string",
default = "", help="define the ids of the vehicles that should be re-routed.")
optParser.add_option("-x", "--with-exittime", action="store_true", dest="withexittime",
default= False, help="Write the exit times for all edges")
optParser.add_option("-s", "--route-sorted", action="store_true", dest="routesorted",
default= False, help="sorts the output by departure time")
optParser.add_option("-p", "--path", dest="path",
default=os.environ.get("SUMO_BINDIR", ""), help="Path to binaries")
optParser.add_option("--cost-modifier", dest="costmodifier", type="choice",
choices=('grohnde', 'isar', 'None'),
default='None', help="Whether to modify link travel costs of the given routes")
(options, args) = optParser.parse_args()
sumo = "sumo"
if options.mesosim:
sumo = "meso"
if options.path:
if os.path.isfile(options.path):
sumoBinary = options.path
else:
sumoBinary = checkBinary(sumo, options.path)
else:
sumoBinary = checkBinary(sumo)
if options.costmodifier != 'None':
pyPath = os.path.abspath(os.path.dirname(sys.argv[0]))
sys.path.append(os.path.join(pyPath, "..", "..", "..", "..","..", "tools", "kkwSim"))
from kkwCostModifier import costModifier
print('use the cost modifier')
log = open("one_shot-log.txt", "w")
starttime = datetime.now()
for step in options.frequencies.split(","):
step = int(step)
print("> Running simulation with update frequency %s" % step)
btime = datetime.now()
print(">> Begin time %s" % btime)
if options.costmodifier != 'None':
currentDir = os.getcwd()
print(options.costmodifier)
outputfile = '%s_weights_%s.xml' % (options.costmodifier, step)
costModifier(outputfile, step, "dump", options.aggregation, currentDir, options.costmodifier, 'one-shot')
writeSUMOConf(step, options, options.trips)
call([sumoBinary, "-c", "one_shot_%s.sumocfg" % step], log)
etime = datetime.now()
print(">> End time %s" % etime)
print("< Step %s ended (duration: %s)" % (step, etime-btime))
print("------------------\n")
print("one-shot ended (duration: %s)" % (datetime.now() - starttime))
log.close()
| gpl-3.0 | -3,441,668,568,745,313,000 | 47.810056 | 195 | 0.617145 | false |
sjl/flask-csrf | setup.py | 1 | 1143 | """
flask-csrf
----------
A small Flask extension for adding CSRF protection.
Links
`````
* `documentation <http://sjl.bitbucket.org/flask-csrf/>`_
* `development version
<http://bitbucket.org/sjl/flask-csrf/get/tip.gz#egg=flask-csrf-dev`_
"""
from setuptools import setup
setup(
name='flask-csrf',
version='0.9.2',
url='http://sjl.bitbucket.org/flask-csrf/',
license='MIT',
author='Steve Losh',
author_email='[email protected]',
description='A small Flask extension for adding CSRF protection.',
long_description=__doc__,
packages=['flaskext'],
namespace_packages=['flaskext'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask>0.1'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| mit | -2,995,481,810,937,407,000 | 24.4 | 70 | 0.619423 | false |
saullocastro/pyNastran | pyNastran/gui/vtk_examples/vtk_cutter.py | 4 | 1603 | #!/usr/bin/env python
# kills the program when you hit Cntl+C from the command line
# doesn't save the current state as presumably there's been an error
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
# A simple script to demonstrate the vtkCutter function
import vtk
#Create a cube
cube = vtk.vtkCubeSource()
cube.SetXLength(40)
cube.SetYLength(30)
cube.SetZLength(20)
cube_mapper = vtk.vtkPolyDataMapper()
cube_mapper.SetInputConnection(cube.GetOutputPort())
#create a plane to cut,here it cuts in the XZ direction (xz normal=(1,0,0);XY =(0,0,1),YZ =(0,1,0)
plane = vtk.vtkPlane()
plane.SetOrigin(10, 0, 0)
plane.SetNormal(1, 0, 0)
#create cutter
cutter = vtk.vtkCutter()
cutter.SetCutFunction(plane)
cutter.SetInputConnection(cube.GetOutputPort())
cutter.Update()
cutter_mapper = vtk.vtkPolyDataMapper()
cutter_mapper.SetInputConnection(cutter.GetOutputPort())
#create plane actor
plane_actor = vtk.vtkActor()
plane_actor.GetProperty().SetColor(1.0, 1, 0)
plane_actor.GetProperty().SetLineWidth(2)
plane_actor.SetMapper(cutter_mapper)
#create cube actor
cube_actor = vtk.vtkActor()
cube_actor.GetProperty().SetColor(0.5, 1, 0.5)
cube_actor.GetProperty().SetOpacity(0.5)
cube_actor.SetMapper(cube_mapper)
#create renderers and add actors of plane and cube
ren = vtk.vtkRenderer()
ren.AddActor(plane_actor)
ren.AddActor(cube_actor)
#Add renderer to renderwindow and render
ren_win = vtk.vtkRenderWindow()
ren_win.AddRenderer(ren)
ren_win.SetSize(600, 600)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(ren_win)
ren.SetBackground(0, 0, 0)
ren_win.Render()
iren.Start()
| lgpl-3.0 | 3,986,643,517,945,188,000 | 27.122807 | 98 | 0.769807 | false |
daxxi13/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/addanime.py | 30 | 3025 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_parse,
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
)
class AddAnimeIE(InfoExtractor):
_VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<id>[\w_]+)(?:.*)'
_TEST = {
'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
'md5': '72954ea10bc979ab5e2eb288b21425a0',
'info_dict': {
'id': '24MR3YO5SAS9',
'ext': 'mp4',
'description': 'One Piece 606',
'title': 'One Piece 606',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
try:
webpage = self._download_webpage(url, video_id)
except ExtractorError as ee:
if not isinstance(ee.cause, compat_HTTPError) or \
ee.cause.code != 503:
raise
redir_webpage = ee.cause.read().decode('utf-8')
action = self._search_regex(
r'<form id="challenge-form" action="([^"]+)"',
redir_webpage, 'Redirect form')
vc = self._search_regex(
r'<input type="hidden" name="jschl_vc" value="([^"]+)"/>',
redir_webpage, 'redirect vc value')
av = re.search(
r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
redir_webpage)
if av is None:
raise ExtractorError('Cannot find redirect math task')
av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3))
parsed_url = compat_urllib_parse_urlparse(url)
av_val = av_res + len(parsed_url.netloc)
confirm_url = (
parsed_url.scheme + '://' + parsed_url.netloc +
action + '?' +
compat_urllib_parse.urlencode({
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
self._download_webpage(
confirm_url, video_id,
note='Confirming after redirect')
webpage = self._download_webpage(url, video_id)
formats = []
for format_id in ('normal', 'hq'):
rex = r"var %s_video_file = '(.*?)';" % re.escape(format_id)
video_url = self._search_regex(rex, webpage, 'video file URLx',
fatal=False)
if not video_url:
continue
formats.append({
'format_id': format_id,
'url': video_url,
})
self._sort_formats(formats)
video_title = self._og_search_title(webpage)
video_description = self._og_search_description(webpage)
return {
'_type': 'video',
'id': video_id,
'formats': formats,
'title': video_title,
'description': video_description
}
| gpl-3.0 | 7,868,871,040,285,022,000 | 33.770115 | 100 | 0.49686 | false |
mcloudv/fuel-ostf | fuel_health/tests/sanity/test_sanity_sahara.py | 1 | 7295 | # Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuel_health.common.utils.data_utils import rand_name
from fuel_health import saharamanager
class SaharaTemplatesTest(saharamanager.SaharaTestsManager):
_plugin_name = 'An unknown plugin name'
_hadoop_version = 'An unknown Hadoop version'
_node_processes = 'An unknown list of processes'
def setUp(self):
super(SaharaTemplatesTest, self).setUp()
flavor_id = self.create_flavor()
self.ng_template = {
'name': rand_name('sahara-ng-template-'),
'plugin_name': self._plugin_name,
'hadoop_version': self._hadoop_version,
'flavor_id': flavor_id,
'node_processes': self._node_processes,
'description': 'Test node group template'
}
self.cl_template = {
'name': rand_name('sahara-cl-template-'),
'plugin_name': self._plugin_name,
'hadoop_version': self._hadoop_version,
'node_groups': [
{
'name': 'all-in-one',
'flavor_id': flavor_id,
'node_processes': self._node_processes,
'count': 1
}
],
'description': 'Test cluster template'
}
self.client = self.sahara_client
class VanillaTwoTemplatesTest(SaharaTemplatesTest):
_plugin_name = 'vanilla'
_hadoop_version = '2.4.1'
_node_processes = ['resourcemanager', 'namenode', 'secondarynamenode',
'oozie', 'historyserver', 'nodemanager', 'datanode']
def test_vanilla_two_templates(self):
"""Sahara test for checking CRUD operations on Vanilla2 templates
Target component: Sahara
Scenario:
1. Create a simple node group template
2. Get the node group template
3. List node group templates
4. Delete the node group template
5. Create a simple cluster template
6. Get the cluster template
7. List cluster templates
8. Delete the cluster template
Duration: 80 s.
Available since release: 2014.2-6.1
Deployment tags: Sahara
"""
fail_msg = 'Failed to create node group template.'
ng_template = self.verify(10, self.client.node_group_templates.create,
1, fail_msg, 'creating node group template',
**self.ng_template)
fail_msg = 'Failed to get node group template.'
self.verify(10, self.client.node_group_templates.get, 2,
fail_msg, 'getting node group template', ng_template.id)
fail_msg = 'Failed to list node group templates.'
self.verify(10, self.client.node_group_templates.list, 3,
fail_msg, 'listing node group templates')
fail_msg = 'Failed to delete node group template.'
self.verify(10, self.client.node_group_templates.delete, 4,
fail_msg, 'deleting node group template', ng_template.id)
fail_msg = 'Failed to create cluster template.'
cl_template = self.verify(10, self.client.cluster_templates.create, 5,
fail_msg, 'creating cluster template',
**self.cl_template)
fail_msg = 'Failed to get cluster template.'
self.verify(10, self.sahara_client.cluster_templates.get, 6,
fail_msg, 'getting cluster template', cl_template.id)
fail_msg = 'Failed to list cluster templates.'
self.verify(10, self.sahara_client.cluster_templates.list, 7,
fail_msg, 'listing cluster templates')
fail_msg = 'Failed to delete cluster template.'
self.verify(10, self.sahara_client.cluster_templates.delete, 8,
fail_msg, 'deleting cluster template', cl_template.id)
class HDPTwoTemplatesTest(SaharaTemplatesTest):
_plugin_name = 'hdp'
_hadoop_version = '2.0.6'
_node_processes = ['NODEMANAGER', 'DATANODE', 'HDFS_CLIENT', 'PIG',
'ZOOKEEPER_CLIENT', 'MAPREDUCE2_CLIENT', 'YARN_CLIENT',
'OOZIE_CLIENT', 'RESOURCEMANAGER', 'OOZIE_SERVER',
'SECONDARY_NAMENODE', 'AMBARI_SERVER', 'NAMENODE',
'ZOOKEEPER_SERVER', 'HISTORYSERVER', 'GANGLIA_SERVER']
def test_hdp_two_templates(self):
"""Sahara test for checking CRUD operations on HDP2 templates
Target component: Sahara
Scenario:
1. Create a simple node group template
2. Get the node group template
3. List node group templates
4. Delete the node group template
5. Create a simple cluster template
6. Get the cluster template
7. List cluster templates
8. Delete the cluster template
Duration: 80 s.
Available since release: 2014.2-6.1
Deployment tags: Sahara
"""
fail_msg = 'Failed to create node group template.'
ng_template = self.verify(10, self.client.node_group_templates.create,
1, fail_msg, 'creating node group template',
**self.ng_template)
fail_msg = 'Failed to get node group template.'
self.verify(10, self.client.node_group_templates.get, 2,
fail_msg, 'getting node group template', ng_template.id)
fail_msg = 'Failed to list node group templates.'
self.verify(10, self.client.node_group_templates.list, 3,
fail_msg, 'listing node group templates')
fail_msg = 'Failed to delete node group template.'
self.verify(10, self.client.node_group_templates.delete, 4,
fail_msg, 'deleting node group template', ng_template.id)
fail_msg = 'Failed to create cluster template.'
cl_template = self.verify(10, self.client.cluster_templates.create, 5,
fail_msg, 'creating cluster template',
**self.cl_template)
fail_msg = 'Failed to get cluster template.'
self.verify(10, self.sahara_client.cluster_templates.get, 6,
fail_msg, 'getting cluster template', cl_template.id)
fail_msg = 'Failed to list cluster templates.'
self.verify(10, self.sahara_client.cluster_templates.list, 7,
fail_msg, 'listing cluster templates')
fail_msg = 'Failed to delete cluster template.'
self.verify(10, self.sahara_client.cluster_templates.delete, 8,
fail_msg, 'deleting cluster template', cl_template.id)
| apache-2.0 | 8,312,028,947,680,183,000 | 41.16763 | 78 | 0.595065 | false |
DoubleNegativeVisualEffects/gaffer | python/GafferTest/ArrayPlugTest.py | 5 | 10757 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import gc
import IECore
import Gaffer
import GafferTest
class ArrayPlugTest( unittest.TestCase ) :
def test( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" not in n["in"] )
self.assertEqual( len( n["in"] ), 1 )
self.assertTrue( n["in"]["e1"].isSame( n["in"][0] ) )
n["in"][0].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 2 )
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" in n["in"] )
n["in"][0].setInput( None )
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" not in n["in"] )
self.assertEqual( len( n["in"] ), 1 )
def testConnectionGaps( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
n["in"][0].setInput( a["sum"] )
n["in"][1].setInput( a["sum"] )
n["in"][2].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 4 )
self.assertTrue( n["in"]["e1"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e2"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e3"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e4"].getInput() is None )
n["in"][1].setInput( None )
self.assertEqual( len( n["in"] ), 4 )
self.assertTrue( n["in"]["e1"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e2"].getInput() is None )
self.assertTrue( n["in"]["e3"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e4"].getInput() is None )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
s["n"]["in"][2].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( None )
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].isSame( s["n"]["in"][0] ) )
self.assertTrue( s["n"]["in"]["e2"].isSame( s["n"]["in"][1] ) )
self.assertTrue( s["n"]["in"]["e3"].isSame( s["n"]["in"][2] ) )
self.assertTrue( s["n"]["in"]["e4"].isSame( s["n"]["in"][3] ) )
self.assertTrue( s["n"]["in"]["e1"].getInput(), s["a"]["sum"] )
self.assertTrue( s["n"]["in"]["e2"].getInput() is None )
self.assertTrue( s["n"]["in"]["e3"].getInput(), s["a"]["sum"] )
self.assertTrue( s["n"]["in"]["e4"].getInput() is None )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
def testMaximumInputs( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
# connect all inputs
for i in range( 0, 6 ) :
n["in"][i].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 6 )
for i in range( 0, 6 ) :
self.assertTrue( n["in"][i].getInput().isSame( a["sum"] ) )
# check that removing the one before the last
# leaves the last in place.
n["in"][4].setInput( None )
self.assertEqual( len( n["in"] ), 6 )
for i in range( 0, 6 ) :
if i != 4 :
self.assertTrue( n["in"][i].getInput().isSame( a["sum"] ) )
else :
self.assertTrue( n["in"][i].getInput() is None )
def testMakeConnectionAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
with Gaffer.UndoContext( s ) :
s["n"]["in"][0].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["in"] ), 2 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( s["n"]["in"][1].isSame( s["n"]["in"]["e2"] ) )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 1 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
s.redo()
self.assertEqual( len( s["n"]["in"] ), 2 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( s["n"]["in"][1].isSame( s["n"]["in"]["e2"] ) )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 1 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( "in" in s["n"] )
self.assertFalse( "in1" in s["n"] )
def testMinimumInputs( self ) :
a = GafferTest.AddNode()
n = Gaffer.Node()
n["in"] = Gaffer.ArrayPlug( "in", element = Gaffer.IntPlug( "e1" ), minSize=3 )
self.assertEqual( len( n["in"] ), 3 )
# connecting to the middle input shouldn't create
# any new inputs, because there is still one free on the end
n["in"]["e2"].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 3 )
# connecting to the last input should create a new
# one - there should always be one free input on the
# end (until the maximum is reached).
n["in"]["e3"].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 4 )
n["in"]["e3"].setInput( None )
self.assertEqual( len( n["in"] ), 3 )
def testDeleteAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"]["e1"].setInput( s["a"]["sum"] )
s["n"]["in"]["e2"].setInput( s["a"]["sum"] )
s["n"]["in"]["e3"].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
with Gaffer.UndoContext( s ) :
s.deleteNodes( s, Gaffer.StandardSet( [ s["n"] ] ) )
self.assertFalse( "n" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
s.redo()
self.assertFalse( "n" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
def testDeleteInputNodeAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
s["n"]["in"][2].setInput( s["a"]["sum"] )
n = s["n"]
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
with Gaffer.UndoContext( s ) :
s.deleteNodes( s, Gaffer.StandardSet( [ s["a"] ] ) )
self.assertFalse( "a" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
s.redo()
self.assertFalse( "a" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
def testFixedLengthDynamic( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = Gaffer.Node()
s["n"]["a"] = Gaffer.ArrayPlug( "a", element = Gaffer.IntPlug(), minSize = 4, maxSize = 4, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["a"][1].setInput( s["a"]["sum"] )
s["n"]["a"][2].setInput( s["a"]["sum"] )
self.assertEqual( s["n"]["a"].minSize(), 4 )
self.assertEqual( s["n"]["a"].maxSize(), 4 )
self.assertEqual( len( s["n"]["a"] ), 4 )
self.assertTrue( s["n"]["a"][0].getInput() is None )
self.assertTrue( s["n"]["a"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["a"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["a"][3].getInput() is None )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["n"]["a"].minSize(), 4 )
self.assertEqual( s2["n"]["a"].maxSize(), 4 )
self.assertEqual( len( s2["n"]["a"] ), 4 )
self.assertTrue( s2["n"]["a"][0].getInput() is None )
self.assertTrue( s2["n"]["a"][1].getInput().isSame( s2["a"]["sum"] ) )
self.assertTrue( s2["n"]["a"][1].getInput().isSame( s2["a"]["sum"] ) )
self.assertTrue( s2["n"]["a"][3].getInput() is None )
def tearDown( self ) :
# some bugs in the InputGenerator only showed themselves when
# the ScriptNode was deleted during garbage collection, often
# in totally unrelated tests. so we run the garbage collector
# here to localise any problems to this test, making them
# easier to diagnose and fix.
while gc.collect() :
pass
IECore.RefCounted.collectGarbage()
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -7,084,898,802,702,663,000 | 32.827044 | 156 | 0.572 | false |
mikhaelharswanto/ryu | ryu/tests/unit/packet/test_icmpv6.py | 7 | 69723 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
import struct
import inspect
from nose.tools import ok_, eq_, nottest, raises
from nose.plugins.skip import Skip, SkipTest
from ryu.ofproto import ether, inet
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.packet import Packet
from ryu.lib.packet import icmpv6
from ryu.lib.packet.ipv6 import ipv6
from ryu.lib.packet import packet_utils
from ryu.lib import addrconv
LOG = logging.getLogger(__name__)
def icmpv6_csum(prev, buf):
ph = struct.pack('!16s16sI3xB',
addrconv.ipv6.text_to_bin(prev.src),
addrconv.ipv6.text_to_bin(prev.dst),
prev.payload_length, prev.nxt)
h = bytearray(buf)
struct.pack_into('!H', h, 2, 0)
return packet_utils.checksum(ph + h)
class Test_icmpv6_header(unittest.TestCase):
type_ = 255
code = 0
csum = 207
buf = '\xff\x00\x00\xcf'
icmp = icmpv6.icmpv6(type_, code, 0)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_, self.icmp.type_)
eq_(self.code, self.icmp.code)
eq_(0, self.icmp.csum)
def test_parser(self):
msg, n, _ = self.icmp.parser(self.buf)
eq_(msg.type_, self.type_)
eq_(msg.code, self.code)
eq_(msg.csum, self.csum)
eq_(msg.data, None)
eq_(n, None)
def test_serialize(self):
src_ipv6 = 'fe80::200:ff:fe00:ef'
dst_ipv6 = 'fe80::200:ff:fe00:1'
prev = ipv6(6, 0, 0, 4, 58, 255, src_ipv6, dst_ipv6)
buf = self.icmp.serialize(bytearray(), prev)
(type_, code, csum) = struct.unpack(self.icmp._PACK_STR, buffer(buf))
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, self.csum)
@raises(Exception)
def test_malformed_icmpv6(self):
m_short_buf = self.buf[1:self.icmp._MIN_LEN]
self.icmp.parser(m_short_buf)
def test_default_args(self):
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6()
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
def test_json(self):
jsondict = self.icmp.to_jsondict()
icmp = icmpv6.icmpv6.from_jsondict(jsondict['icmpv6'])
eq_(str(self.icmp), str(icmp))
class Test_icmpv6_echo_request(unittest.TestCase):
type_ = 128
code = 0
csum = 0xa572
id_ = 0x7620
seq = 0
data = '\x01\xc9\xe7\x36\xd3\x39\x06\x00'
buf = '\x80\x00\xa5\x72\x76\x20\x00\x00'
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
echo = icmpv6.echo(0, 0)
eq_(echo.id, 0)
eq_(echo.seq, 0)
eq_(echo.data, None)
def _test_parser(self, data=None):
buf = self.buf + str(data or '')
msg, n, _ = icmpv6.icmpv6.parser(buf)
eq_(msg.type_, self.type_)
eq_(msg.code, self.code)
eq_(msg.csum, self.csum)
eq_(msg.data.id, self.id_)
eq_(msg.data.seq, self.seq)
eq_(msg.data.data, data)
eq_(n, None)
def test_parser_without_data(self):
self._test_parser()
def test_parser_with_data(self):
self._test_parser(self.data)
def _test_serialize(self, echo_data=None):
buf = self.buf + str(echo_data or '')
src_ipv6 = '3ffe:507:0:1:200:86ff:fe05:80da'
dst_ipv6 = '3ffe:501:0:1001::2'
prev = ipv6(6, 0, 0, len(buf), 64, 255, src_ipv6, dst_ipv6)
echo_csum = icmpv6_csum(prev, buf)
echo = icmpv6.echo(self.id_, self.seq, echo_data)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, echo)
buf = buffer(icmp.serialize(bytearray(), prev))
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, buf, 0)
(id_, seq) = struct.unpack_from(echo._PACK_STR, buf, icmp._MIN_LEN)
data = buf[(icmp._MIN_LEN + echo._MIN_LEN):]
data = data if len(data) != 0 else None
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, echo_csum)
eq_(id_, self.id_)
eq_(seq, self.seq)
eq_(data, echo_data)
def test_serialize_without_data(self):
self._test_serialize()
def test_serialize_with_data(self):
self._test_serialize(self.data)
def test_to_string(self):
ec = icmpv6.echo(self.id_, self.seq, self.data)
ic = icmpv6.icmpv6(self.type_, self.code, self.csum, ec)
echo_values = {'id': self.id_,
'seq': self.seq,
'data': self.data}
_echo_str = ','.join(['%s=%s' % (k, repr(echo_values[k]))
for k, v in inspect.getmembers(ec)
if k in echo_values])
echo_str = '%s(%s)' % (icmpv6.echo.__name__, _echo_str)
icmp_values = {'type_': repr(self.type_),
'code': repr(self.code),
'csum': repr(self.csum),
'data': echo_str}
_ic_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, v in inspect.getmembers(ic)
if k in icmp_values])
ic_str = '%s(%s)' % (icmpv6.icmpv6.__name__, _ic_str)
eq_(str(ic), ic_str)
eq_(repr(ic), ic_str)
def test_default_args(self):
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ICMPV6_ECHO_REQUEST, data=icmpv6.echo())
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ICMPV6_ECHO_REQUEST)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.echo._PACK_STR, str(buf[4:]))
eq_(res[0], 0)
eq_(res[1], 0)
def test_json(self):
ec = icmpv6.echo(self.id_, self.seq, self.data)
ic1 = icmpv6.icmpv6(self.type_, self.code, self.csum, ec)
jsondict = ic1.to_jsondict()
ic2 = icmpv6.icmpv6.from_jsondict(jsondict['icmpv6'])
eq_(str(ic1), str(ic2))
class Test_icmpv6_echo_reply(Test_icmpv6_echo_request):
def setUp(self):
self.type_ = 129
self.csum = 0xa472
self.buf = '\x81\x00\xa4\x72\x76\x20\x00\x00'
def test_default_args(self):
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ICMPV6_ECHO_REPLY, data=icmpv6.echo())
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ICMPV6_ECHO_REPLY)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.echo._PACK_STR, str(buf[4:]))
eq_(res[0], 0)
eq_(res[1], 0)
class Test_icmpv6_neighbor_solicit(unittest.TestCase):
type_ = 135
code = 0
csum = 0x952d
res = 0
dst = '3ffe:507:0:1:200:86ff:fe05:80da'
nd_type = 1
nd_length = 1
nd_hw_src = '00:60:97:07:69:ea'
data = '\x01\x01\x00\x60\x97\x07\x69\xea'
buf = '\x87\x00\x95\x2d\x00\x00\x00\x00' \
+ '\x3f\xfe\x05\x07\x00\x00\x00\x01' \
+ '\x02\x00\x86\xff\xfe\x05\x80\xda'
src_ipv6 = '3ffe:507:0:1:200:86ff:fe05:80da'
dst_ipv6 = '3ffe:501:0:1001::2'
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
nd = icmpv6.nd_neighbor(self.res, self.dst)
eq_(nd.res, self.res)
eq_(nd.dst, self.dst)
eq_(nd.option, None)
def _test_parser(self, data=None):
buf = self.buf + str(data or '')
msg, n, _ = icmpv6.icmpv6.parser(buf)
eq_(msg.type_, self.type_)
eq_(msg.code, self.code)
eq_(msg.csum, self.csum)
eq_(msg.data.res, self.res)
eq_(addrconv.ipv6.text_to_bin(msg.data.dst),
addrconv.ipv6.text_to_bin(self.dst))
eq_(n, None)
if data:
nd = msg.data.option
eq_(nd.length, self.nd_length)
eq_(nd.hw_src, self.nd_hw_src)
eq_(nd.data, None)
def test_parser_without_data(self):
self._test_parser()
def test_parser_with_data(self):
self._test_parser(self.data)
def test_serialize_without_data(self):
nd = icmpv6.nd_neighbor(self.res, self.dst)
prev = ipv6(6, 0, 0, 24, 64, 255, self.src_ipv6, self.dst_ipv6)
nd_csum = icmpv6_csum(prev, self.buf)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, nd)
buf = buffer(icmp.serialize(bytearray(), prev))
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, buf, 0)
(res, dst) = struct.unpack_from(nd._PACK_STR, buf, icmp._MIN_LEN)
data = buf[(icmp._MIN_LEN + nd._MIN_LEN):]
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, nd_csum)
eq_(res >> 29, self.res)
eq_(dst, addrconv.ipv6.text_to_bin(self.dst))
eq_(data, '')
def test_serialize_with_data(self):
nd_opt = icmpv6.nd_option_sla(self.nd_length, self.nd_hw_src)
nd = icmpv6.nd_neighbor(self.res, self.dst, nd_opt)
prev = ipv6(6, 0, 0, 32, 64, 255, self.src_ipv6, self.dst_ipv6)
nd_csum = icmpv6_csum(prev, self.buf + self.data)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, nd)
buf = buffer(icmp.serialize(bytearray(), prev))
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, buf, 0)
(res, dst) = struct.unpack_from(nd._PACK_STR, buf, icmp._MIN_LEN)
(nd_type, nd_length, nd_hw_src) = struct.unpack_from(
nd_opt._PACK_STR, buf, icmp._MIN_LEN + nd._MIN_LEN)
data = buf[(icmp._MIN_LEN + nd._MIN_LEN + 8):]
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, nd_csum)
eq_(res >> 29, self.res)
eq_(dst, addrconv.ipv6.text_to_bin(self.dst))
eq_(nd_type, self.nd_type)
eq_(nd_length, self.nd_length)
eq_(nd_hw_src, addrconv.mac.text_to_bin(self.nd_hw_src))
def test_to_string(self):
nd_opt = icmpv6.nd_option_sla(self.nd_length, self.nd_hw_src)
nd = icmpv6.nd_neighbor(self.res, self.dst, nd_opt)
ic = icmpv6.icmpv6(self.type_, self.code, self.csum, nd)
nd_opt_values = {'length': self.nd_length,
'hw_src': self.nd_hw_src,
'data': None}
_nd_opt_str = ','.join(['%s=%s' % (k, repr(nd_opt_values[k]))
for k, v in inspect.getmembers(nd_opt)
if k in nd_opt_values])
nd_opt_str = '%s(%s)' % (icmpv6.nd_option_sla.__name__, _nd_opt_str)
nd_values = {'res': repr(nd.res),
'dst': repr(self.dst),
'option': nd_opt_str}
_nd_str = ','.join(['%s=%s' % (k, nd_values[k])
for k, v in inspect.getmembers(nd)
if k in nd_values])
nd_str = '%s(%s)' % (icmpv6.nd_neighbor.__name__, _nd_str)
icmp_values = {'type_': repr(self.type_),
'code': repr(self.code),
'csum': repr(self.csum),
'data': nd_str}
_ic_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, v in inspect.getmembers(ic)
if k in icmp_values])
ic_str = '%s(%s)' % (icmpv6.icmpv6.__name__, _ic_str)
eq_(str(ic), ic_str)
eq_(repr(ic), ic_str)
def test_default_args(self):
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_SOLICIT, data=icmpv6.nd_neighbor())
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_NEIGHBOR_SOLICIT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_neighbor._PACK_STR, str(buf[4:]))
eq_(res[0], 0)
eq_(res[1], addrconv.ipv6.text_to_bin('::'))
# with nd_option_sla
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_SOLICIT,
data=icmpv6.nd_neighbor(
option=icmpv6.nd_option_sla()))
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_NEIGHBOR_SOLICIT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_neighbor._PACK_STR, str(buf[4:24]))
eq_(res[0], 0)
eq_(res[1], addrconv.ipv6.text_to_bin('::'))
res = struct.unpack(icmpv6.nd_option_sla._PACK_STR, str(buf[24:]))
eq_(res[0], icmpv6.ND_OPTION_SLA)
eq_(res[1], len(icmpv6.nd_option_sla()) / 8)
eq_(res[2], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
def test_json(self):
nd_opt = icmpv6.nd_option_sla(self.nd_length, self.nd_hw_src)
nd = icmpv6.nd_neighbor(self.res, self.dst, nd_opt)
ic1 = icmpv6.icmpv6(self.type_, self.code, self.csum, nd)
jsondict = ic1.to_jsondict()
ic2 = icmpv6.icmpv6.from_jsondict(jsondict['icmpv6'])
eq_(str(ic1), str(ic2))
class Test_icmpv6_neighbor_advert(Test_icmpv6_neighbor_solicit):
def setUp(self):
self.type_ = 136
self.csum = 0xb8ba
self.res = 7
self.dst = '3ffe:507:0:1:260:97ff:fe07:69ea'
self.nd_type = 2
self.nd_length = 1
self.nd_data = None
self.nd_hw_src = '00:60:97:07:69:ea'
self.data = '\x02\x01\x00\x60\x97\x07\x69\xea'
self.buf = '\x88\x00\xb8\xba\xe0\x00\x00\x00' \
+ '\x3f\xfe\x05\x07\x00\x00\x00\x01' \
+ '\x02\x60\x97\xff\xfe\x07\x69\xea'
def test_serialize_with_data(self):
nd_opt = icmpv6.nd_option_tla(self.nd_length, self.nd_hw_src)
nd = icmpv6.nd_neighbor(self.res, self.dst, nd_opt)
prev = ipv6(6, 0, 0, 32, 64, 255, self.src_ipv6, self.dst_ipv6)
nd_csum = icmpv6_csum(prev, self.buf + self.data)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, nd)
buf = buffer(icmp.serialize(bytearray(), prev))
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, buf, 0)
(res, dst) = struct.unpack_from(nd._PACK_STR, buf, icmp._MIN_LEN)
(nd_type, nd_length, nd_hw_src) = struct.unpack_from(
nd_opt._PACK_STR, buf, icmp._MIN_LEN + nd._MIN_LEN)
data = buf[(icmp._MIN_LEN + nd._MIN_LEN + 8):]
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, nd_csum)
eq_(res >> 29, self.res)
eq_(dst, addrconv.ipv6.text_to_bin(self.dst))
eq_(nd_type, self.nd_type)
eq_(nd_length, self.nd_length)
eq_(nd_hw_src, addrconv.mac.text_to_bin(self.nd_hw_src))
def test_to_string(self):
nd_opt = icmpv6.nd_option_tla(self.nd_length, self.nd_hw_src)
nd = icmpv6.nd_neighbor(self.res, self.dst, nd_opt)
ic = icmpv6.icmpv6(self.type_, self.code, self.csum, nd)
nd_opt_values = {'length': self.nd_length,
'hw_src': self.nd_hw_src,
'data': None}
_nd_opt_str = ','.join(['%s=%s' % (k, repr(nd_opt_values[k]))
for k, v in inspect.getmembers(nd_opt)
if k in nd_opt_values])
nd_opt_str = '%s(%s)' % (icmpv6.nd_option_tla.__name__, _nd_opt_str)
nd_values = {'res': repr(nd.res),
'dst': repr(self.dst),
'option': nd_opt_str}
_nd_str = ','.join(['%s=%s' % (k, nd_values[k])
for k, v in inspect.getmembers(nd)
if k in nd_values])
nd_str = '%s(%s)' % (icmpv6.nd_neighbor.__name__, _nd_str)
icmp_values = {'type_': repr(self.type_),
'code': repr(self.code),
'csum': repr(self.csum),
'data': nd_str}
_ic_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, v in inspect.getmembers(ic)
if k in icmp_values])
ic_str = '%s(%s)' % (icmpv6.icmpv6.__name__, _ic_str)
eq_(str(ic), ic_str)
eq_(repr(ic), ic_str)
def test_default_args(self):
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_ADVERT, data=icmpv6.nd_neighbor())
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_NEIGHBOR_ADVERT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_neighbor._PACK_STR, str(buf[4:]))
eq_(res[0], 0)
eq_(res[1], addrconv.ipv6.text_to_bin('::'))
# with nd_option_tla
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_ADVERT,
data=icmpv6.nd_neighbor(
option=icmpv6.nd_option_tla()))
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_NEIGHBOR_ADVERT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_neighbor._PACK_STR, str(buf[4:24]))
eq_(res[0], 0)
eq_(res[1], addrconv.ipv6.text_to_bin('::'))
res = struct.unpack(icmpv6.nd_option_tla._PACK_STR, str(buf[24:]))
eq_(res[0], icmpv6.ND_OPTION_TLA)
eq_(res[1], len(icmpv6.nd_option_tla()) / 8)
eq_(res[2], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
class Test_icmpv6_router_solicit(unittest.TestCase):
type_ = 133
code = 0
csum = 0x97d9
res = 0
nd_type = 1
nd_length = 1
nd_hw_src = '12:2d:a5:6d:bc:0f'
data = '\x00\x00\x00\x00\x01\x01\x12\x2d\xa5\x6d\xbc\x0f'
buf = '\x85\x00\x97\xd9'
src_ipv6 = '3ffe:507:0:1:200:86ff:fe05:80da'
dst_ipv6 = '3ffe:501:0:1001::2'
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
rs = icmpv6.nd_router_solicit(self.res)
eq_(rs.res, self.res)
eq_(rs.option, None)
def _test_parser(self, data=None):
buf = self.buf + str(data or '')
msg, n, _ = icmpv6.icmpv6.parser(buf)
eq_(msg.type_, self.type_)
eq_(msg.code, self.code)
eq_(msg.csum, self.csum)
if data is not None:
eq_(msg.data.res, self.res)
eq_(n, None)
if data:
rs = msg.data.option
eq_(rs.length, self.nd_length)
eq_(rs.hw_src, self.nd_hw_src)
eq_(rs.data, None)
def test_parser_without_data(self):
self._test_parser()
def test_parser_with_data(self):
self._test_parser(self.data)
def test_serialize_without_data(self):
rs = icmpv6.nd_router_solicit(self.res)
prev = ipv6(6, 0, 0, 8, 64, 255, self.src_ipv6, self.dst_ipv6)
rs_csum = icmpv6_csum(prev, self.buf)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, rs)
buf = buffer(icmp.serialize(bytearray(), prev))
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, buf, 0)
res = struct.unpack_from(rs._PACK_STR, buf, icmp._MIN_LEN)
data = buf[(icmp._MIN_LEN + rs._MIN_LEN):]
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, rs_csum)
eq_(res[0], self.res)
eq_(data, '')
def test_serialize_with_data(self):
nd_opt = icmpv6.nd_option_sla(self.nd_length, self.nd_hw_src)
rs = icmpv6.nd_router_solicit(self.res, nd_opt)
prev = ipv6(6, 0, 0, 16, 64, 255, self.src_ipv6, self.dst_ipv6)
rs_csum = icmpv6_csum(prev, self.buf + self.data)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, rs)
buf = buffer(icmp.serialize(bytearray(), prev))
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, buf, 0)
res = struct.unpack_from(rs._PACK_STR, buf, icmp._MIN_LEN)
(nd_type, nd_length, nd_hw_src) = struct.unpack_from(
nd_opt._PACK_STR, buf, icmp._MIN_LEN + rs._MIN_LEN)
data = buf[(icmp._MIN_LEN + rs._MIN_LEN + 8):]
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, rs_csum)
eq_(res[0], self.res)
eq_(nd_type, self.nd_type)
eq_(nd_length, self.nd_length)
eq_(nd_hw_src, addrconv.mac.text_to_bin(self.nd_hw_src))
def test_to_string(self):
nd_opt = icmpv6.nd_option_sla(self.nd_length, self.nd_hw_src)
rs = icmpv6.nd_router_solicit(self.res, nd_opt)
ic = icmpv6.icmpv6(self.type_, self.code, self.csum, rs)
nd_opt_values = {'length': self.nd_length,
'hw_src': self.nd_hw_src,
'data': None}
_nd_opt_str = ','.join(['%s=%s' % (k, repr(nd_opt_values[k]))
for k, v in inspect.getmembers(nd_opt)
if k in nd_opt_values])
nd_opt_str = '%s(%s)' % (icmpv6.nd_option_sla.__name__, _nd_opt_str)
rs_values = {'res': repr(rs.res),
'option': nd_opt_str}
_rs_str = ','.join(['%s=%s' % (k, rs_values[k])
for k, v in inspect.getmembers(rs)
if k in rs_values])
rs_str = '%s(%s)' % (icmpv6.nd_router_solicit.__name__, _rs_str)
icmp_values = {'type_': repr(self.type_),
'code': repr(self.code),
'csum': repr(self.csum),
'data': rs_str}
_ic_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, v in inspect.getmembers(ic)
if k in icmp_values])
ic_str = '%s(%s)' % (icmpv6.icmpv6.__name__, _ic_str)
eq_(str(ic), ic_str)
eq_(repr(ic), ic_str)
def test_default_args(self):
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_ROUTER_SOLICIT, data=icmpv6.nd_router_solicit())
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_ROUTER_SOLICIT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_router_solicit._PACK_STR, str(buf[4:]))
eq_(res[0], 0)
# with nd_option_sla
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_ROUTER_SOLICIT,
data=icmpv6.nd_router_solicit(
option=icmpv6.nd_option_sla()))
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_ROUTER_SOLICIT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_router_solicit._PACK_STR, str(buf[4:8]))
eq_(res[0], 0)
res = struct.unpack(icmpv6.nd_option_sla._PACK_STR, str(buf[8:]))
eq_(res[0], icmpv6.ND_OPTION_SLA)
eq_(res[1], len(icmpv6.nd_option_sla()) / 8)
eq_(res[2], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
def test_json(self):
nd_opt = icmpv6.nd_option_sla(self.nd_length, self.nd_hw_src)
rs = icmpv6.nd_router_solicit(self.res, nd_opt)
ic1 = icmpv6.icmpv6(self.type_, self.code, self.csum, rs)
jsondict = ic1.to_jsondict()
ic2 = icmpv6.icmpv6.from_jsondict(jsondict['icmpv6'])
eq_(str(ic1), str(ic2))
class Test_icmpv6_router_advert(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_default_args(self):
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_ROUTER_ADVERT, data=icmpv6.nd_router_advert())
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_ROUTER_ADVERT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_router_advert._PACK_STR, str(buf[4:]))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
# with nd_option_sla
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_ROUTER_ADVERT,
data=icmpv6.nd_router_advert(
options=[icmpv6.nd_option_sla()]))
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_ROUTER_ADVERT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_router_advert._PACK_STR, str(buf[4:16]))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
res = struct.unpack(icmpv6.nd_option_sla._PACK_STR, str(buf[16:]))
eq_(res[0], icmpv6.ND_OPTION_SLA)
eq_(res[1], len(icmpv6.nd_option_sla()) / 8)
eq_(res[2], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
# with nd_option_pi
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_ROUTER_ADVERT,
data=icmpv6.nd_router_advert(
options=[icmpv6.nd_option_pi()]))
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_ROUTER_ADVERT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_router_advert._PACK_STR, str(buf[4:16]))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
res = struct.unpack(icmpv6.nd_option_pi._PACK_STR, str(buf[16:]))
eq_(res[0], icmpv6.ND_OPTION_PI)
eq_(res[1], 4)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
eq_(res[5], 0)
eq_(res[6], 0)
eq_(res[7], addrconv.ipv6.text_to_bin('::'))
# with nd_option_sla and nd_option_pi
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_ROUTER_ADVERT,
data=icmpv6.nd_router_advert(
options=[icmpv6.nd_option_sla(), icmpv6.nd_option_pi()]))
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_ROUTER_ADVERT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_router_advert._PACK_STR, str(buf[4:16]))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
res = struct.unpack(icmpv6.nd_option_sla._PACK_STR, str(buf[16:24]))
eq_(res[0], icmpv6.ND_OPTION_SLA)
eq_(res[1], len(icmpv6.nd_option_sla()) / 8)
eq_(res[2], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
res = struct.unpack(icmpv6.nd_option_pi._PACK_STR, str(buf[24:]))
eq_(res[0], icmpv6.ND_OPTION_PI)
eq_(res[1], len(icmpv6.nd_option_pi()) / 8)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
eq_(res[5], 0)
eq_(res[6], 0)
eq_(res[7], addrconv.ipv6.text_to_bin('::'))
def test_json(self):
ic1 = icmpv6.icmpv6(
type_=icmpv6.ND_ROUTER_ADVERT,
data=icmpv6.nd_router_advert(
options=[icmpv6.nd_option_sla(), icmpv6.nd_option_pi()]))
jsondict = ic1.to_jsondict()
ic2 = icmpv6.icmpv6.from_jsondict(jsondict['icmpv6'])
eq_(str(ic1), str(ic2))
class Test_icmpv6_nd_option_la(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_default_args(self):
la = icmpv6.nd_option_sla()
buf = la.serialize()
res = struct.unpack(icmpv6.nd_option_sla._PACK_STR, str(buf))
eq_(res[0], icmpv6.ND_OPTION_SLA)
eq_(res[1], len(icmpv6.nd_option_sla()) / 8)
eq_(res[2], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
# with nd_neighbor
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_ADVERT,
data=icmpv6.nd_neighbor(
option=icmpv6.nd_option_tla()))
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_NEIGHBOR_ADVERT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_neighbor._PACK_STR, str(buf[4:24]))
eq_(res[0], 0)
eq_(res[1], addrconv.ipv6.text_to_bin('::'))
res = struct.unpack(icmpv6.nd_option_tla._PACK_STR, str(buf[24:]))
eq_(res[0], icmpv6.ND_OPTION_TLA)
eq_(res[1], len(icmpv6.nd_option_tla()) / 8)
eq_(res[2], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
# with nd_router_solicit
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_ROUTER_SOLICIT,
data=icmpv6.nd_router_solicit(
option=icmpv6.nd_option_sla()))
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_ROUTER_SOLICIT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_router_solicit._PACK_STR, str(buf[4:8]))
eq_(res[0], 0)
res = struct.unpack(icmpv6.nd_option_sla._PACK_STR, str(buf[8:]))
eq_(res[0], icmpv6.ND_OPTION_SLA)
eq_(res[1], len(icmpv6.nd_option_sla()) / 8)
eq_(res[2], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
class Test_icmpv6_nd_option_pi(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_default_args(self):
pi = icmpv6.nd_option_pi()
buf = pi.serialize()
res = struct.unpack(icmpv6.nd_option_pi._PACK_STR, str(buf))
eq_(res[0], icmpv6.ND_OPTION_PI)
eq_(res[1], len(icmpv6.nd_option_pi()) / 8)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
eq_(res[5], 0)
eq_(res[6], 0)
eq_(res[7], addrconv.ipv6.text_to_bin('::'))
# with nd_router_advert
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.ND_ROUTER_ADVERT,
data=icmpv6.nd_router_advert(
options=[icmpv6.nd_option_pi()]))
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.ND_ROUTER_ADVERT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_router_advert._PACK_STR, str(buf[4:16]))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
res = struct.unpack(icmpv6.nd_option_pi._PACK_STR, str(buf[16:]))
eq_(res[0], icmpv6.ND_OPTION_PI)
eq_(res[1], 4)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
eq_(res[5], 0)
eq_(res[6], 0)
eq_(res[7], addrconv.ipv6.text_to_bin('::'))
class Test_icmpv6_membership_query(unittest.TestCase):
type_ = 130
code = 0
csum = 0xb5a4
maxresp = 10000
address = 'ff08::1'
buf = '\x82\x00\xb5\xa4\x27\x10\x00\x00' \
+ '\xff\x08\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01'
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
mld = icmpv6.mld(self.maxresp, self.address)
eq_(mld.maxresp, self.maxresp)
eq_(mld.address, self.address)
def test_parser(self):
msg, n, _ = icmpv6.icmpv6.parser(self.buf)
eq_(msg.type_, self.type_)
eq_(msg.code, self.code)
eq_(msg.csum, self.csum)
eq_(msg.data.maxresp, self.maxresp)
eq_(msg.data.address, self.address)
eq_(n, None)
def test_serialize(self):
src_ipv6 = '3ffe:507:0:1:200:86ff:fe05:80da'
dst_ipv6 = '3ffe:501:0:1001::2'
prev = ipv6(6, 0, 0, len(self.buf), 64, 255, src_ipv6, dst_ipv6)
mld_csum = icmpv6_csum(prev, self.buf)
mld = icmpv6.mld(self.maxresp, self.address)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, mld)
buf = buffer(icmp.serialize(bytearray(), prev))
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, buf, 0)
(maxresp, address) = struct.unpack_from(
mld._PACK_STR, buf, icmp._MIN_LEN)
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, mld_csum)
eq_(maxresp, self.maxresp)
eq_(address, addrconv.ipv6.text_to_bin(self.address))
def test_to_string(self):
ml = icmpv6.mld(self.maxresp, self.address)
ic = icmpv6.icmpv6(self.type_, self.code, self.csum, ml)
mld_values = {'maxresp': self.maxresp,
'address': self.address}
_mld_str = ','.join(['%s=%s' % (k, repr(mld_values[k]))
for k, v in inspect.getmembers(ml)
if k in mld_values])
mld_str = '%s(%s)' % (icmpv6.mld.__name__, _mld_str)
icmp_values = {'type_': repr(self.type_),
'code': repr(self.code),
'csum': repr(self.csum),
'data': mld_str}
_ic_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, v in inspect.getmembers(ic)
if k in icmp_values])
ic_str = '%s(%s)' % (icmpv6.icmpv6.__name__, _ic_str)
eq_(str(ic), ic_str)
eq_(repr(ic), ic_str)
def test_default_args(self):
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.MLD_LISTENER_QUERY, data=icmpv6.mld())
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.MLD_LISTENER_QUERY)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.mld._PACK_STR, str(buf[4:]))
eq_(res[0], 0)
eq_(res[1], addrconv.ipv6.text_to_bin('::'))
def test_json(self):
ic1 = icmpv6.icmpv6(
type_=icmpv6.MLD_LISTENER_QUERY,
data=icmpv6.mld())
jsondict = ic1.to_jsondict()
ic2 = icmpv6.icmpv6.from_jsondict(jsondict['icmpv6'])
eq_(str(ic1), str(ic2))
class Test_icmpv6_membership_report(Test_icmpv6_membership_query):
type_ = 131
code = 0
csum = 0xb4a4
maxresp = 10000
address = 'ff08::1'
buf = '\x83\x00\xb4\xa4\x27\x10\x00\x00' \
+ '\xff\x08\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01'
def test_json(self):
ic1 = icmpv6.icmpv6(
type_=icmpv6.MLD_LISTENER_REPOR,
data=icmpv6.mld())
jsondict = ic1.to_jsondict()
ic2 = icmpv6.icmpv6.from_jsondict(jsondict['icmpv6'])
eq_(str(ic1), str(ic2))
class Test_icmpv6_membership_done(Test_icmpv6_membership_query):
type_ = 132
code = 0
csum = 0xb3a4
maxresp = 10000
address = 'ff08::1'
buf = '\x84\x00\xb3\xa4\x27\x10\x00\x00' \
+ '\xff\x08\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01'
def test_json(self):
ic1 = icmpv6.icmpv6(
type_=icmpv6.MLD_LISTENER_DONE,
data=icmpv6.mld())
jsondict = ic1.to_jsondict()
ic2 = icmpv6.icmpv6.from_jsondict(jsondict['icmpv6'])
eq_(str(ic1), str(ic2))
class Test_mldv2_query(unittest.TestCase):
type_ = 130
code = 0
csum = 0xb5a4
maxresp = 10000
address = 'ff08::1'
s_flg = 0
qrv = 2
s_qrv = s_flg << 3 | qrv
qqic = 10
num = 0
srcs = []
mld = icmpv6.mldv2_query(
maxresp, address, s_flg, qrv, qqic, num, srcs)
buf = '\x82\x00\xb5\xa4\x27\x10\x00\x00' \
+ '\xff\x08\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01' \
+ '\x02\x0a\x00\x00'
def setUp(self):
pass
def setUp_with_srcs(self):
self.num = 2
self.srcs = ['ff80::1', 'ff80::2']
self.mld = icmpv6.mldv2_query(
self.maxresp, self.address, self.s_flg, self.qrv, self.qqic,
self.num, self.srcs)
self.buf = '\x82\x00\xb5\xa4\x27\x10\x00\x00' \
+ '\xff\x08\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01' \
+ '\x02\x0a\x00\x02' \
+ '\xff\x80\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01' \
+ '\xff\x80\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x02'
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.mld.maxresp, self.maxresp)
eq_(self.mld.address, self.address)
eq_(self.mld.s_flg, self.s_flg)
eq_(self.mld.qrv, self.qrv)
eq_(self.mld.qqic, self.qqic)
eq_(self.mld.num, self.num)
eq_(self.mld.srcs, self.srcs)
def test_init_with_srcs(self):
self.setUp_with_srcs()
self.test_init()
def test_parser(self):
msg, n, _ = icmpv6.icmpv6.parser(self.buf)
eq_(msg.type_, self.type_)
eq_(msg.code, self.code)
eq_(msg.csum, self.csum)
eq_(msg.data.maxresp, self.maxresp)
eq_(msg.data.address, self.address)
eq_(msg.data.s_flg, self.s_flg)
eq_(msg.data.qrv, self.qrv)
eq_(msg.data.qqic, self.qqic)
eq_(msg.data.num, self.num)
eq_(msg.data.srcs, self.srcs)
eq_(n, None)
def test_parser_with_srcs(self):
self.setUp_with_srcs()
self.test_parser()
def test_serialize(self):
src_ipv6 = '3ffe:507:0:1:200:86ff:fe05:80da'
dst_ipv6 = '3ffe:501:0:1001::2'
prev = ipv6(6, 0, 0, len(self.buf), 64, 255, src_ipv6, dst_ipv6)
mld_csum = icmpv6_csum(prev, self.buf)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, self.mld)
buf = icmp.serialize(bytearray(), prev)
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, str(buf))
(maxresp, address, s_qrv, qqic, num) = struct.unpack_from(
self.mld._PACK_STR, str(buf), icmp._MIN_LEN)
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, mld_csum)
eq_(maxresp, self.maxresp)
eq_(address, addrconv.ipv6.text_to_bin(self.address))
s_flg = (s_qrv >> 3) & 0b1
qrv = s_qrv & 0b111
eq_(s_flg, self.s_flg)
eq_(qrv, self.qrv)
eq_(qqic, self.qqic)
eq_(num, self.num)
def test_serialize_with_srcs(self):
self.setUp_with_srcs()
src_ipv6 = '3ffe:507:0:1:200:86ff:fe05:80da'
dst_ipv6 = '3ffe:501:0:1001::2'
prev = ipv6(6, 0, 0, len(self.buf), 64, 255, src_ipv6, dst_ipv6)
mld_csum = icmpv6_csum(prev, self.buf)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, self.mld)
buf = icmp.serialize(bytearray(), prev)
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, str(buf))
(maxresp, address, s_qrv, qqic, num) = struct.unpack_from(
self.mld._PACK_STR, str(buf), icmp._MIN_LEN)
(addr1, addr2) = struct.unpack_from(
'!16s16s', str(buf), icmp._MIN_LEN + self.mld._MIN_LEN)
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, mld_csum)
eq_(maxresp, self.maxresp)
eq_(address, addrconv.ipv6.text_to_bin(self.address))
s_flg = (s_qrv >> 3) & 0b1
qrv = s_qrv & 0b111
eq_(s_flg, self.s_flg)
eq_(qrv, self.qrv)
eq_(qqic, self.qqic)
eq_(num, self.num)
eq_(addr1, addrconv.ipv6.text_to_bin(self.srcs[0]))
eq_(addr2, addrconv.ipv6.text_to_bin(self.srcs[1]))
def _build_mldv2_query(self):
e = ethernet(ethertype=ether.ETH_TYPE_IPV6)
i = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(type_=icmpv6.MLD_LISTENER_QUERY,
data=self.mld)
p = e/i/ic
return p
def test_build_mldv2_query(self):
p = self._build_mldv2_query()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_IPV6)
i = self.find_protocol(p, "ipv6")
ok_(i)
eq_(i.nxt, inet.IPPROTO_ICMPV6)
ic = self.find_protocol(p, "icmpv6")
ok_(ic)
eq_(ic.type_, icmpv6.MLD_LISTENER_QUERY)
eq_(ic.data.maxresp, self.maxresp)
eq_(ic.data.address, self.address)
eq_(ic.data.s_flg, self.s_flg)
eq_(ic.data.qrv, self.qrv)
eq_(ic.data.num, self.num)
eq_(ic.data.srcs, self.srcs)
def test_build_mldv2_query_with_srcs(self):
self.setUp_with_srcs()
self.test_build_mldv2_query()
def test_to_string(self):
ic = icmpv6.icmpv6(self.type_, self.code, self.csum, self.mld)
mld_values = {'maxresp': self.maxresp,
'address': self.address,
's_flg': self.s_flg,
'qrv': self.qrv,
'qqic': self.qqic,
'num': self.num,
'srcs': self.srcs}
_mld_str = ','.join(['%s=%s' % (k, repr(mld_values[k]))
for k, v in inspect.getmembers(self.mld)
if k in mld_values])
mld_str = '%s(%s)' % (icmpv6.mldv2_query.__name__, _mld_str)
icmp_values = {'type_': repr(self.type_),
'code': repr(self.code),
'csum': repr(self.csum),
'data': mld_str}
_ic_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, v in inspect.getmembers(ic)
if k in icmp_values])
ic_str = '%s(%s)' % (icmpv6.icmpv6.__name__, _ic_str)
eq_(str(ic), ic_str)
eq_(repr(ic), ic_str)
def test_to_string_with_srcs(self):
self.setUp_with_srcs()
self.test_to_string()
@raises(Exception)
def test_num_larger_than_srcs(self):
self.srcs = ['ff80::1', 'ff80::2', 'ff80::3']
self.num = len(self.srcs) + 1
self.buf = pack(icmpv6.mldv2_query._PACK_STR, self.maxresp,
addrconv.ipv6.text_to_bin(self.address),
self.s_qrv, self.qqic, self.num)
for src in self.srcs:
self.buf += pack('16s', addrconv.ipv6.text_to_bin(src))
self.mld = icmpv6.mldv2_query(
self.maxresp, self.address, self.s_flg, self.qrv, self.qqic,
self.num, self.srcs)
self.test_parser()
@raises(Exception)
def test_num_smaller_than_srcs(self):
self.srcs = ['ff80::1', 'ff80::2', 'ff80::3']
self.num = len(self.srcs) - 1
self.buf = pack(icmpv6.mldv2_query._PACK_STR, self.maxresp,
addrconv.ipv6.text_to_bin(self.address),
self.s_qrv, self.qqic, self.num)
for src in self.srcs:
self.buf += pack('16s', addrconv.ipv6.text_to_bin(src))
self.mld = icmpv6.mldv2_query(
self.maxresp, self.address, self.s_flg, self.qrv, self.qqic,
self.num, self.srcs)
self.test_parser()
def test_default_args(self):
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.MLD_LISTENER_QUERY, data=icmpv6.mldv2_query())
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.MLD_LISTENER_QUERY)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.mldv2_query._PACK_STR, str(buf[4:]))
eq_(res[0], 0)
eq_(res[1], addrconv.ipv6.text_to_bin('::'))
eq_(res[2], 2)
eq_(res[3], 0)
eq_(res[4], 0)
# srcs without num
srcs = ['ff80::1', 'ff80::2', 'ff80::3']
que = icmpv6.mldv2_query(srcs=srcs)
buf = que.serialize()
res = struct.unpack_from(
icmpv6.mldv2_query._PACK_STR, str(buf))
eq_(res[0], 0)
eq_(res[1], addrconv.ipv6.text_to_bin('::'))
eq_(res[2], 2)
eq_(res[3], 0)
eq_(res[4], len(srcs))
(src1, src2, src3) = struct.unpack_from(
'16s16s16s', str(buf), icmpv6.mldv2_query._MIN_LEN)
eq_(src1, addrconv.ipv6.text_to_bin(srcs[0]))
eq_(src2, addrconv.ipv6.text_to_bin(srcs[1]))
eq_(src3, addrconv.ipv6.text_to_bin(srcs[2]))
def test_json(self):
jsondict = self.mld.to_jsondict()
mld = icmpv6.mldv2_query.from_jsondict(jsondict['mldv2_query'])
eq_(str(self.mld), str(mld))
def test_json_with_srcs(self):
self.setUp_with_srcs()
self.test_json()
class Test_mldv2_report(unittest.TestCase):
type_ = 143
code = 0
csum = 0xb5a4
record_num = 0
records = []
mld = icmpv6.mldv2_report(record_num, records)
buf = '\x8f\x00\xb5\xa4\x00\x00\x00\x00'
def setUp(self):
pass
def setUp_with_records(self):
self.record1 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 0, 0, 'ff00::1')
self.record2 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 0, 2, 'ff00::2',
['fe80::1', 'fe80::2'])
self.record3 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 1, 0, 'ff00::3', [], 'abc\x00')
self.record4 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 2, 2, 'ff00::4',
['fe80::1', 'fe80::2'], 'abcde\x00\x00\x00')
self.records = [self.record1, self.record2, self.record3,
self.record4]
self.record_num = len(self.records)
self.mld = icmpv6.mldv2_report(self.record_num, self.records)
self.buf = '\x8f\x00\xb5\xa4\x00\x00\x00\x04' \
+ '\x01\x00\x00\x00' \
+ '\xff\x00\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01' \
+ '\x01\x00\x00\x02' \
+ '\xff\x00\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x02' \
+ '\xfe\x80\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01' \
+ '\xfe\x80\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x02' \
+ '\x01\x01\x00\x00' \
+ '\xff\x00\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x03' \
+ '\x61\x62\x63\x00' \
+ '\x01\x02\x00\x02' \
+ '\xff\x00\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x04' \
+ '\xfe\x80\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01' \
+ '\xfe\x80\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x02' \
+ '\x61\x62\x63\x64\x65\x00\x00\x00'
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.mld.record_num, self.record_num)
eq_(self.mld.records, self.records)
def test_init_with_records(self):
self.setUp_with_records()
self.test_init()
def test_parser(self):
msg, n, _ = icmpv6.icmpv6.parser(self.buf)
eq_(msg.type_, self.type_)
eq_(msg.code, self.code)
eq_(msg.csum, self.csum)
eq_(msg.data.record_num, self.record_num)
eq_(repr(msg.data.records), repr(self.records))
def test_parser_with_records(self):
self.setUp_with_records()
self.test_parser()
def test_serialize(self):
src_ipv6 = '3ffe:507:0:1:200:86ff:fe05:80da'
dst_ipv6 = '3ffe:501:0:1001::2'
prev = ipv6(6, 0, 0, len(self.buf), 64, 255, src_ipv6, dst_ipv6)
mld_csum = icmpv6_csum(prev, self.buf)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, self.mld)
buf = icmp.serialize(bytearray(), prev)
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, str(buf))
(record_num, ) = struct.unpack_from(
self.mld._PACK_STR, str(buf), icmp._MIN_LEN)
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, mld_csum)
eq_(record_num, self.record_num)
def test_serialize_with_records(self):
self.setUp_with_records()
src_ipv6 = '3ffe:507:0:1:200:86ff:fe05:80da'
dst_ipv6 = '3ffe:501:0:1001::2'
prev = ipv6(6, 0, 0, len(self.buf), 64, 255, src_ipv6, dst_ipv6)
mld_csum = icmpv6_csum(prev, self.buf)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, self.mld)
buf = icmp.serialize(bytearray(), prev)
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, str(buf))
(record_num, ) = struct.unpack_from(
self.mld._PACK_STR, str(buf), icmp._MIN_LEN)
offset = icmp._MIN_LEN + self.mld._MIN_LEN
rec1 = icmpv6.mldv2_report_group.parser(buffer(buf[offset:]))
offset += len(rec1)
rec2 = icmpv6.mldv2_report_group.parser(buffer(buf[offset:]))
offset += len(rec2)
rec3 = icmpv6.mldv2_report_group.parser(buffer(buf[offset:]))
offset += len(rec3)
rec4 = icmpv6.mldv2_report_group.parser(buffer(buf[offset:]))
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, mld_csum)
eq_(record_num, self.record_num)
eq_(repr(rec1), repr(self.record1))
eq_(repr(rec2), repr(self.record2))
eq_(repr(rec3), repr(self.record3))
eq_(repr(rec4), repr(self.record4))
def _build_mldv2_report(self):
e = ethernet(ethertype=ether.ETH_TYPE_IPV6)
i = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(type_=icmpv6.MLDV2_LISTENER_REPORT,
data=self.mld)
p = e/i/ic
return p
def test_build_mldv2_report(self):
p = self._build_mldv2_report()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_IPV6)
i = self.find_protocol(p, "ipv6")
ok_(i)
eq_(i.nxt, inet.IPPROTO_ICMPV6)
ic = self.find_protocol(p, "icmpv6")
ok_(ic)
eq_(ic.type_, icmpv6.MLDV2_LISTENER_REPORT)
eq_(ic.data.record_num, self.record_num)
eq_(ic.data.records, self.records)
def test_build_mldv2_report_with_records(self):
self.setUp_with_records()
self.test_build_mldv2_report()
def test_to_string(self):
ic = icmpv6.icmpv6(self.type_, self.code, self.csum, self.mld)
mld_values = {'record_num': self.record_num,
'records': self.records}
_mld_str = ','.join(['%s=%s' % (k, repr(mld_values[k]))
for k, v in inspect.getmembers(self.mld)
if k in mld_values])
mld_str = '%s(%s)' % (icmpv6.mldv2_report.__name__, _mld_str)
icmp_values = {'type_': repr(self.type_),
'code': repr(self.code),
'csum': repr(self.csum),
'data': mld_str}
_ic_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, v in inspect.getmembers(ic)
if k in icmp_values])
ic_str = '%s(%s)' % (icmpv6.icmpv6.__name__, _ic_str)
eq_(str(ic), ic_str)
eq_(repr(ic), ic_str)
def test_to_string_with_records(self):
self.setUp_with_records()
self.test_to_string()
@raises(Exception)
def test_record_num_larger_than_records(self):
self.record1 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 0, 0, 'ff00::1')
self.record2 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 0, 2, 'ff00::2',
['fe80::1', 'fe80::2'])
self.record3 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 1, 0, 'ff00::3', [], 'abc\x00')
self.record4 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 2, 2, 'ff00::4',
['fe80::1', 'fe80::2'], 'abcde\x00\x00\x00')
self.records = [self.record1, self.record2, self.record3,
self.record4]
self.record_num = len(self.records) + 1
self.buf = struct.pack(
icmpv6.mldv2_report._PACK_STR, self.record_num)
self.buf += self.record1.serialize()
self.buf += self.record2.serialize()
self.buf += self.record3.serialize()
self.buf += self.record4.serialize()
self.mld = icmpv6.mldv2_report(self.record_num, self.records)
self.test_parser()
@raises(Exception)
def test_record_num_smaller_than_records(self):
self.record1 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 0, 0, 'ff00::1')
self.record2 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 0, 2, 'ff00::2',
['fe80::1', 'fe80::2'])
self.record3 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 1, 0, 'ff00::3', [], 'abc\x00')
self.record4 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 2, 2, 'ff00::4',
['fe80::1', 'fe80::2'], 'abcde\x00\x00\x00')
self.records = [self.record1, self.record2, self.record3,
self.record4]
self.record_num = len(self.records) - 1
self.buf = struct.pack(
icmpv6.mldv2_report._PACK_STR, self.record_num)
self.buf += self.record1.serialize()
self.buf += self.record2.serialize()
self.buf += self.record3.serialize()
self.buf += self.record4.serialize()
self.mld = icmpv6.mldv2_report(self.record_num, self.records)
self.test_parser()
def test_default_args(self):
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(
type_=icmpv6.MLDV2_LISTENER_REPORT, data=icmpv6.mldv2_report())
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, str(buf[:4]))
eq_(res[0], icmpv6.MLDV2_LISTENER_REPORT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.mldv2_report._PACK_STR, str(buf[4:]))
eq_(res[0], 0)
# records without record_num
record1 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 0, 0, 'ff00::1')
record2 = icmpv6.mldv2_report_group(
icmpv6.MODE_IS_INCLUDE, 0, 2, 'ff00::2',
['fe80::1', 'fe80::2'])
records = [record1, record2]
rep = icmpv6.mldv2_report(records=records)
buf = rep.serialize()
res = struct.unpack_from(
icmpv6.mldv2_report._PACK_STR, str(buf))
eq_(res[0], len(records))
res = struct.unpack_from(
icmpv6.mldv2_report_group._PACK_STR, str(buf),
icmpv6.mldv2_report._MIN_LEN)
eq_(res[0], icmpv6.MODE_IS_INCLUDE)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], addrconv.ipv6.text_to_bin('ff00::1'))
res = struct.unpack_from(
icmpv6.mldv2_report_group._PACK_STR, str(buf),
icmpv6.mldv2_report._MIN_LEN +
icmpv6.mldv2_report_group._MIN_LEN)
eq_(res[0], icmpv6.MODE_IS_INCLUDE)
eq_(res[1], 0)
eq_(res[2], 2)
eq_(res[3], addrconv.ipv6.text_to_bin('ff00::2'))
res = struct.unpack_from(
'16s16s', str(buf),
icmpv6.mldv2_report._MIN_LEN +
icmpv6.mldv2_report_group._MIN_LEN +
icmpv6.mldv2_report_group._MIN_LEN)
eq_(res[0], addrconv.ipv6.text_to_bin('fe80::1'))
eq_(res[1], addrconv.ipv6.text_to_bin('fe80::2'))
def test_json(self):
jsondict = self.mld.to_jsondict()
mld = icmpv6.mldv2_report.from_jsondict(jsondict['mldv2_report'])
eq_(str(self.mld), str(mld))
def test_json_with_records(self):
self.setUp_with_records()
self.test_json()
class Test_mldv2_report_group(unittest.TestCase):
type_ = icmpv6.MODE_IS_INCLUDE
aux_len = 0
num = 0
address = 'ff00::1'
srcs = []
aux = None
mld = icmpv6.mldv2_report_group(
type_, aux_len, num, address, srcs, aux)
buf = '\x01\x00\x00\x00' \
+ '\xff\x00\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01'
def setUp(self):
pass
def setUp_with_srcs(self):
self.srcs = ['fe80::1', 'fe80::2', 'fe80::3']
self.num = len(self.srcs)
self.mld = icmpv6.mldv2_report_group(
self.type_, self.aux_len, self.num, self.address, self.srcs,
self.aux)
self.buf = '\x01\x00\x00\x03' \
+ '\xff\x00\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01' \
+ '\xfe\x80\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01' \
+ '\xfe\x80\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x02' \
+ '\xfe\x80\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x03'
def setUp_with_aux(self):
self.aux = '\x01\x02\x03\x04\x05\x06\x07\x08'
self.aux_len = len(self.aux) / 4
self.mld = icmpv6.mldv2_report_group(
self.type_, self.aux_len, self.num, self.address, self.srcs,
self.aux)
self.buf = '\x01\x02\x00\x00' \
+ '\xff\x00\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01' \
+ '\x01\x02\x03\x04\x05\x06\x07\x08'
def setUp_with_srcs_and_aux(self):
self.srcs = ['fe80::1', 'fe80::2', 'fe80::3']
self.num = len(self.srcs)
self.aux = '\x01\x02\x03\x04\x05\x06\x07\x08'
self.aux_len = len(self.aux) / 4
self.mld = icmpv6.mldv2_report_group(
self.type_, self.aux_len, self.num, self.address, self.srcs,
self.aux)
self.buf = '\x01\x02\x00\x03' \
+ '\xff\x00\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01' \
+ '\xfe\x80\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x01' \
+ '\xfe\x80\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x02' \
+ '\xfe\x80\x00\x00\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00\x00\x00\x00\x03' \
+ '\x01\x02\x03\x04\x05\x06\x07\x08'
def tearDown(self):
pass
def test_init(self):
eq_(self.mld.type_, self.type_)
eq_(self.mld.aux_len, self.aux_len)
eq_(self.mld.num, self.num)
eq_(self.mld.address, self.address)
eq_(self.mld.srcs, self.srcs)
eq_(self.mld.aux, self.aux)
def test_init_with_srcs(self):
self.setUp_with_srcs()
self.test_init()
def test_init_with_aux(self):
self.setUp_with_aux()
self.test_init()
def test_init_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
self.test_init()
def test_parser(self):
_res = icmpv6.mldv2_report_group.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(res.type_, self.type_)
eq_(res.aux_len, self.aux_len)
eq_(res.num, self.num)
eq_(res.address, self.address)
eq_(res.srcs, self.srcs)
eq_(res.aux, self.aux)
def test_parser_with_srcs(self):
self.setUp_with_srcs()
self.test_parser()
def test_parser_with_aux(self):
self.setUp_with_aux()
self.test_parser()
def test_parser_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
self.test_parser()
def test_serialize(self):
buf = self.mld.serialize()
res = struct.unpack_from(
icmpv6.mldv2_report_group._PACK_STR, buffer(buf))
eq_(res[0], self.type_)
eq_(res[1], self.aux_len)
eq_(res[2], self.num)
eq_(res[3], addrconv.ipv6.text_to_bin(self.address))
def test_serialize_with_srcs(self):
self.setUp_with_srcs()
buf = self.mld.serialize()
res = struct.unpack_from(
icmpv6.mldv2_report_group._PACK_STR, buffer(buf))
(src1, src2, src3) = struct.unpack_from(
'16s16s16s', buffer(buf), icmpv6.mldv2_report_group._MIN_LEN)
eq_(res[0], self.type_)
eq_(res[1], self.aux_len)
eq_(res[2], self.num)
eq_(res[3], addrconv.ipv6.text_to_bin(self.address))
eq_(src1, addrconv.ipv6.text_to_bin(self.srcs[0]))
eq_(src2, addrconv.ipv6.text_to_bin(self.srcs[1]))
eq_(src3, addrconv.ipv6.text_to_bin(self.srcs[2]))
def test_serialize_with_aux(self):
self.setUp_with_aux()
buf = self.mld.serialize()
res = struct.unpack_from(
icmpv6.mldv2_report_group._PACK_STR, buffer(buf))
(aux, ) = struct.unpack_from(
'%ds' % (self.aux_len * 4), buffer(buf),
icmpv6.mldv2_report_group._MIN_LEN)
eq_(res[0], self.type_)
eq_(res[1], self.aux_len)
eq_(res[2], self.num)
eq_(res[3], addrconv.ipv6.text_to_bin(self.address))
eq_(aux, self.aux)
def test_serialize_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
buf = self.mld.serialize()
res = struct.unpack_from(
icmpv6.mldv2_report_group._PACK_STR, buffer(buf))
(src1, src2, src3) = struct.unpack_from(
'16s16s16s', buffer(buf), icmpv6.mldv2_report_group._MIN_LEN)
(aux, ) = struct.unpack_from(
'%ds' % (self.aux_len * 4), buffer(buf),
icmpv6.mldv2_report_group._MIN_LEN + 16 * 3)
eq_(res[0], self.type_)
eq_(res[1], self.aux_len)
eq_(res[2], self.num)
eq_(res[3], addrconv.ipv6.text_to_bin(self.address))
eq_(src1, addrconv.ipv6.text_to_bin(self.srcs[0]))
eq_(src2, addrconv.ipv6.text_to_bin(self.srcs[1]))
eq_(src3, addrconv.ipv6.text_to_bin(self.srcs[2]))
eq_(aux, self.aux)
def test_to_string(self):
igmp_values = {'type_': repr(self.type_),
'aux_len': repr(self.aux_len),
'num': repr(self.num),
'address': repr(self.address),
'srcs': repr(self.srcs),
'aux': repr(self.aux)}
_g_str = ','.join(['%s=%s' % (k, igmp_values[k])
for k, v in inspect.getmembers(self.mld)
if k in igmp_values])
g_str = '%s(%s)' % (icmpv6.mldv2_report_group.__name__, _g_str)
eq_(str(self.mld), g_str)
eq_(repr(self.mld), g_str)
def test_to_string_with_srcs(self):
self.setUp_with_srcs()
self.test_to_string()
def test_to_string_with_aux(self):
self.setUp_with_aux()
self.test_to_string()
def test_to_string_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
self.test_to_string()
def test_len(self):
eq_(len(self.mld), 20)
def test_len_with_srcs(self):
self.setUp_with_srcs()
eq_(len(self.mld), 68)
def test_len_with_aux(self):
self.setUp_with_aux()
eq_(len(self.mld), 28)
def test_len_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
eq_(len(self.mld), 76)
@raises
def test_num_larger_than_srcs(self):
self.srcs = ['fe80::1', 'fe80::2', 'fe80::3']
self.num = len(self.srcs) + 1
self.buf = struct.pack(
icmpv6.mldv2_report_group._PACK_STR, self.type_, self.aux_len,
self.num, addrconv.ipv6.text_to_bin(self.address))
for src in self.srcs:
self.buf += pack('16s', addrconv.ipv6.text_to_bin(src))
self.mld = icmpv6.mldv2_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
self.test_parser()
@raises
def test_num_smaller_than_srcs(self):
self.srcs = ['fe80::1', 'fe80::2', 'fe80::3']
self.num = len(self.srcs) - 1
self.buf = struct.pack(
icmpv6.mldv2_report_group._PACK_STR, self.type_, self.aux_len,
self.num, addrconv.ipv6.text_to_bin(self.address))
for src in self.srcs:
self.buf += pack('16s', addrconv.ipv6.text_to_bin(src))
self.mld = icmpv6.mldv2_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
self.test_parser()
@raises
def test_aux_len_larger_than_aux(self):
self.aux = '\x01\x02\x03\x04\x05\x06\x07\x08'
self.aux_len = len(self.aux) / 4 + 1
self.buf = struct.pack(
icmpv6.mldv2_report_group._PACK_STR, self.type_, self.aux_len,
self.num, addrconv.ipv6.text_to_bin(self.address))
self.buf += self.aux
self.mld = icmpv6.mldv2_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
self.test_parser()
@raises
def test_aux_len_smaller_than_aux(self):
self.aux = '\x01\x02\x03\x04\x05\x06\x07\x08'
self.aux_len = len(self.aux) / 4 - 1
self.buf = struct.pack(
icmpv6.mldv2_report_group._PACK_STR, self.type_, self.aux_len,
self.num, addrconv.ipv6.text_to_bin(self.address))
self.buf += self.aux
self.mld = icmpv6.mldv2_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
self.test_parser()
def test_default_args(self):
rep = icmpv6.mldv2_report_group()
buf = rep.serialize()
res = struct.unpack_from(
icmpv6.mldv2_report_group._PACK_STR, str(buf))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], addrconv.ipv6.text_to_bin('::'))
# srcs without num
srcs = ['fe80::1', 'fe80::2', 'fe80::3']
rep = icmpv6.mldv2_report_group(srcs=srcs)
buf = rep.serialize()
LOG.info(repr(buf))
res = struct.unpack_from(
icmpv6.mldv2_report_group._PACK_STR, str(buf))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], len(srcs))
eq_(res[3], addrconv.ipv6.text_to_bin('::'))
(src1, src2, src3) = struct.unpack_from(
'16s16s16s', str(buf), icmpv6.mldv2_report_group._MIN_LEN)
eq_(src1, addrconv.ipv6.text_to_bin(srcs[0]))
eq_(src2, addrconv.ipv6.text_to_bin(srcs[1]))
eq_(src3, addrconv.ipv6.text_to_bin(srcs[2]))
# aux without aux_len
rep = icmpv6.mldv2_report_group(aux='\x01\x02\x03')
buf = rep.serialize()
res = struct.unpack_from(
icmpv6.mldv2_report_group._PACK_STR, str(buf))
eq_(res[0], 0)
eq_(res[1], 1)
eq_(res[2], 0)
eq_(res[3], addrconv.ipv6.text_to_bin('::'))
eq_(buf[icmpv6.mldv2_report_group._MIN_LEN:], '\x01\x02\x03\x00')
def test_json(self):
jsondict = self.mld.to_jsondict()
mld = icmpv6.mldv2_report_group.from_jsondict(
jsondict['mldv2_report_group'])
eq_(str(self.mld), str(mld))
def test_json_with_srcs(self):
self.setUp_with_srcs()
self.test_json()
def test_json_with_aux(self):
self.setUp_with_aux()
self.test_json()
def test_json_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
self.test_json()
| apache-2.0 | -601,829,008,736,663,800 | 33.619166 | 78 | 0.537039 | false |
batisteo/django-leaflet | leaflet/__init__.py | 1 | 7454 | # -*- coding: utf8 -*-
from __future__ import unicode_literals
try:
from urllib.parse import urlparse, urljoin
except ImportError:
from urlparse import urlparse, urljoin
import warnings
try:
from collections import OrderedDict
except ImportError:
# python 2.6 compatibility (need to install ordereddict package).
from ordereddict import OrderedDict
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from django.utils import six
DEFAULT_TILES = [(_('OSM'), 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors')]
LEAFLET_CONFIG = getattr(settings, 'LEAFLET_CONFIG', {})
app_settings = dict({
'TILES': DEFAULT_TILES,
'ATTRIBUTION_PREFIX': None,
'SPATIAL_EXTENT': None,
'DEFAULT_ZOOM': None,
'DEFAULT_CENTER': None,
'SRID': None,
'TILES_EXTENT': [],
'SCALE': 'metric',
'MINIMAP': False,
'RESET_VIEW': True,
'NO_GLOBALS': True,
'PLUGINS': OrderedDict(),
}, **LEAFLET_CONFIG)
# Backward-compatibility : defaults TILES with value of TILES_URL
if 'TILES_URL' in LEAFLET_CONFIG:
warnings.warn("TILES_URL is deprecated.", DeprecationWarning)
if 'TILES' in LEAFLET_CONFIG:
raise ImproperlyConfigured(_("Remove TILES_URL and keep TILES value."))
app_settings['TILES'] = [(app_settings['TILES_URL'])]
# If TILES is a string, convert to tuple
if isinstance(app_settings.get('TILES'), six.string_types):
app_settings['TILES'] = [(_('Background'), app_settings.get('TILES'), '')]
# Verify that scale setting is valid. For backwards-compatibility, interpret 'True' as 'metric'.
SCALE = app_settings.get("SCALE", None)
if SCALE is True:
app_settings["SCALE"] = 'metric'
elif SCALE not in ('metric', 'imperial', 'both', None, False):
raise ImproperlyConfigured("LEAFLET_CONFIG['SCALE'] must be True, False, None, 'metric', 'imperial' or 'both'.")
SPATIAL_EXTENT = app_settings.get("SPATIAL_EXTENT")
if SPATIAL_EXTENT is None:
# Deprecate lookup in global Django settings
if hasattr(settings, 'SPATIAL_EXTENT'):
warnings.warn("SPATIAL_EXTENT is deprecated. Use LEAFLET_CONFIG['SPATIAL_EXTENT'] instead.", DeprecationWarning)
SPATIAL_EXTENT = getattr(settings, 'SPATIAL_EXTENT', (-180, -90, 180, 90))
if SPATIAL_EXTENT is not None:
if not isinstance(SPATIAL_EXTENT, (tuple, list)) or len(SPATIAL_EXTENT) != 4:
raise ImproperlyConfigured(_("Spatial extent should be a tuple (minx, miny, maxx, maxy)"))
SRID = app_settings.get("SRID")
if SRID is None:
# Deprecate lookup in global Django settings
if hasattr(settings, 'MAP_SRID'):
warnings.warn("MAP_SRID is deprecated. Use LEAFLET_CONFIG['SRID'] instead.", DeprecationWarning)
if hasattr(settings, 'SRID'):
warnings.warn("SRID is deprecated. Use LEAFLET_CONFIG['SRID'] instead.", DeprecationWarning)
SRID = getattr(settings, 'MAP_SRID', getattr(settings, 'SRID', 3857))
if SRID == 3857: # Leaflet's default, do not setup custom projection machinery
SRID = None
TILES_EXTENT = app_settings.get("TILES_EXTENT")
# Due to bug in Leaflet/Proj4Leaflet ()
# landscape extents are not supported.
if SRID and TILES_EXTENT and (TILES_EXTENT[2] - TILES_EXTENT[0] > TILES_EXTENT[3] - TILES_EXTENT[1]):
raise ImproperlyConfigured('Landscape tiles extent not supported (%s).' % (TILES_EXTENT,))
DEFAULT_CENTER = app_settings['DEFAULT_CENTER']
if DEFAULT_CENTER is not None and not (isinstance(DEFAULT_CENTER, (list, tuple)) and len(DEFAULT_CENTER) == 2):
raise ImproperlyConfigured("LEAFLET_CONFIG['DEFAULT_CENTER'] must be an list/tuple with two elements - (lon, lat)")
DEFAULT_ZOOM = app_settings['DEFAULT_ZOOM']
if DEFAULT_ZOOM is not None and not (isinstance(DEFAULT_ZOOM, six.integer_types) and (1 <= DEFAULT_ZOOM <= 24)):
raise ImproperlyConfigured("LEAFLET_CONFIG['DEFAULT_ZOOM'] must be an int between 1 and 24.")
PLUGINS = app_settings['PLUGINS']
if not (isinstance(PLUGINS, dict) and all([isinstance(el, dict) for el in PLUGINS.values()])):
error_msg = """LEAFLET_CONFIG['PLUGINS'] must be dict of dicts in the format:
{ '[plugin_name]': { 'js': '[path-to-js]', 'css': '[path-to-css]' } } .)"""
raise ImproperlyConfigured(error_msg)
PLUGIN_ALL = 'ALL'
PLUGINS_DEFAULT = '__default__'
PLUGIN_FORMS = 'forms'
# Add plugins required for forms (not auto-included)
# Assets will be preprended to any existing entry in PLUGINS['forms']
_forms_js = ['leaflet/draw/leaflet.draw.js',
'leaflet/leaflet.extras.js',
'leaflet/leaflet.forms.js']
_forms_css = ['leaflet/draw/leaflet.draw.css']
_forms_plugins = PLUGINS.setdefault(PLUGIN_FORMS, {})
_forms_plugins['js'] = _forms_js + _forms_plugins.get('js', [])
_forms_plugins['css'] = _forms_css + _forms_plugins.get('css', [])
_forms_plugins.setdefault('auto-include', False)
PLUGINS[PLUGIN_FORMS] = _forms_plugins
# Take advantage of plugin system for Leaflet.MiniMap
if app_settings.get('MINIMAP'):
PLUGINS['minimap'] = {
'css': 'leaflet/Control.MiniMap.css',
'js': 'leaflet/Control.MiniMap.js',
'auto-include': True
}
def _normalize_plugins_config():
"""
Normalizes the PLUGINS setting:
* ensures the 'css' and 'js' are arrays of URLs
* ensures all URLs are transformed as follows:
** if the URL is absolute - leave it as-is
** if the URL is a root URL - starts with a / - leave it as-is
** the the URL is not a root URL - does not start with / - prepend settings.STATIC_URL
Also, adds a special key - ALL - that includes 'css' and 'js' for all plugins listed
"""
if '__is_normalized__' in PLUGINS: # already normalized
return
listed_plugins = list(PLUGINS.keys())
PLUGINS[PLUGINS_DEFAULT] = OrderedDict()
PLUGINS[PLUGIN_ALL] = OrderedDict()
RESOURCE_TYPE_KEYS = ['css', 'js']
for key in listed_plugins:
plugin_dict = PLUGINS[key]
for resource_type in RESOURCE_TYPE_KEYS:
# normalize the resource URLs
urls = plugin_dict.get(resource_type, None)
if isinstance(urls, (six.binary_type, six.string_types)):
urls = [urls]
elif isinstance(urls, tuple): # force to list
urls = list(urls)
elif isinstance(urls, list): # already a list
pass
else: # css/js has not been specified or the wrong type
urls = []
# normalize the URLs - see the docstring for details
for i, url in enumerate(urls):
url_parts = urlparse(url)
if url_parts.scheme or url_parts.path.startswith('/'):
# absolute URL or a URL starting at root
pass
else:
urls[i] = urljoin(settings.STATIC_URL, url)
plugin_dict[resource_type] = urls
# Append it to the DEFAULT pseudo-plugin if auto-include
if plugin_dict.get('auto-include', False):
PLUGINS[PLUGINS_DEFAULT].setdefault(resource_type, []).extend(urls)
# also append it to the ALL pseudo-plugin;
PLUGINS[PLUGIN_ALL].setdefault(resource_type, []).extend(urls)
PLUGINS['__is_normalized__'] = True
_normalize_plugins_config()
| lgpl-3.0 | -8,146,738,726,880,500,000 | 38.433862 | 120 | 0.655441 | false |
blackball/an-test6 | util/run_command.py | 1 | 1868 | import subprocess
import os
import select
from subprocess import PIPE
# Returns (rtn, out, err)
def run_command(cmd, timeout=None, callback=None, stdindata=None):
"""
Run a command and return the text written to stdout and stderr, plus
the return value.
Returns: (int return value, string out, string err)
"""
child = subprocess.Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(fin, fout, ferr) = (child.stdin, child.stdout, child.stderr)
stdin = fin.fileno()
stdout = fout.fileno()
stderr = ferr.fileno()
outbl = []
errbl = []
ineof = outeof = erreof = False
block = 1024
while True:
readers = []
writers = []
if not ineof: writers.append(stdin)
if not outeof: readers.append(stdout)
if not erreof: readers.append(stderr)
if not len(readers):
break
(ready_readers, ready_writers, _) = select.select(readers, writers, [], timeout)
if stdin in ready_writers and stdindata:
bytes_written = os.write(stdin, stdindata[:block])
stdindata = stdindata[bytes_written:]
if not stdindata:
fin.close()
ineof = True
if stdout in ready_readers:
outchunk = os.read(stdout, block)
if len(outchunk) == 0:
outeof = True
outbl.append(outchunk)
if stderr in ready_readers:
errchunk = os.read(stderr, block)
if len(errchunk) == 0:
erreof = True
errbl.append(errchunk)
if callback:
callback()
fout.close()
ferr.close()
w = child.wait()
out = ''.join(outbl)
err = ''.join(errbl)
if not os.WIFEXITED(w):
return (-100, out, err)
rtn = os.WEXITSTATUS(w)
return (rtn, out, err)
| gpl-2.0 | 269,165,512,683,234,530 | 30.661017 | 99 | 0.574411 | false |
LandisTwo/veromix-plasmoid | gtk/Indicator.py | 3 | 7136 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Nik Lutz <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gdk
from Configuration import config
class Indicator:
def __init__(self, veromix):
self.window = veromix.window
self.veromix = veromix
self.menu = Gtk.Menu()
self.indicator = None
if config().get_indicator_type() != 'None':
self.install_menu()
self.install_quicklist()
self.connect_events()
def connect_events(self):
self.veromix.pa_proxy().connect("on_sink_info", self.on_sink_info, self.veromix)
def install_menu(self):
self.APPIND_SUPPORT = True
try: from gi.repository import AppIndicator3
except: self.APPIND_SUPPORT = False
if self.APPIND_SUPPORT and config().get_indicator_type() == 'AppIndicator':
self.indicator = AppIndicator3.Indicator.new("Veromix", "audio-volume-medium", AppIndicator3.IndicatorCategory.APPLICATION_STATUS)
self.indicator.set_status (AppIndicator3.IndicatorStatus.ACTIVE)
self.indicator.set_menu(self.menu)
self.indicator.connect("scroll-event", self.on_scroll_wheel)
# self.indicator.connect("menu-show", self.toggle_window)
toggle = Gtk.MenuItem()
toggle.set_label("Toggle Window")
toggle.connect("activate", self.toggle_window)
mute = Gtk.MenuItem()
mute.set_label("Mute")
mute.connect("activate", self.on_middle_click)
self.menu.append(mute)
self.indicator.set_secondary_activate_target(mute)
self.menu.append(toggle)
self.APPIND_SUPPORT = True
elif config().get_indicator_type() == 'GtkStatusIcon':
self.status_icon = Gtk.StatusIcon()
self.status_icon.set_from_icon_name("audio-volume-medium")
self.status_icon.connect('popup-menu', self.on_right_click_statusicon)
self.status_icon.connect("activate", self.toggle_window)
self.status_icon.connect('scroll_event', self.on_scroll_wheel)
self.status_icon.connect("button_press_event", self.on_status_icon_clicked)
self.APPIND_SUPPORT = False
quit = Gtk.MenuItem()
quit.set_label("Quit")
quit.connect("activate", Gtk.main_quit)
self.menu.append(quit)
self.menu.show_all()
def on_status_icon_clicked(self, widget, event):
if event.button == 2:
# if event.type == Gdk.EventType._2BUTTON_PRESS:
self.on_middle_click(event)
return True
return False
def on_middle_click(self, event, arg=None, data=None):
self.veromix.get_default_sink().toggle_mute()
def on_scroll_wheel(self, widget, event, value = None):
if self.APPIND_SUPPORT:
self.veromix.get_default_sink().step_volume((value == 0))
self.window.present()
else:
if event.direction == Gdk.ScrollDirection.DOWN or event.direction == Gdk.ScrollDirection.LEFT:
self.veromix.get_default_sink().step_volume(False)
if event.direction == Gdk.ScrollDirection.UP or event.direction == Gdk.ScrollDirection.RIGHT:
self.veromix.get_default_sink().step_volume(True)
def toggle_window(self, widget):
if not self.window.is_active():
self.window.present()
else:
self.window.hide()
def get_tray_menu(self):
return self.menu
def on_right_click_statusicon(self, icon, button, time):
self.get_tray_menu()
def pos(menu, aicon):
return (Gtk.StatusIcon.position_menu(menu, aicon))
self.menu.popup(None, None, pos, icon, button, time)
def on_sink_info(self, index, info, sink_box):
channel = sink_box.get_default_sink()
if config().get_indicator_type() != 'None':
if channel == None:
return
volume = channel.pa_sink_proxy().get_volume()
if channel.pa_sink_proxy().is_muted():
self.set_icon("audio-volume-muted")
elif volume > 75:
self.set_icon("audio-volume-high")
elif volume > 30:
self.set_icon("audio-volume-medium")
elif volume > -5:
self.set_icon("audio-volume-low")
if self.DBUSMENU_SUPPORT:
if channel.pa_sink_proxy().is_muted():
self.dbusmenu_mute.property_set_int(self.dbusmenu_checked[0], self.dbusmenu_checked[1])
else:
self.dbusmenu_mute.property_set_int(self.dbusmenu_unchecked[0], self.dbusmenu_unchecked[1])
def set_icon(self, iconname):
if self.APPIND_SUPPORT:
self.indicator.set_icon(iconname)
else:
self.status_icon.set_from_icon_name(iconname)
def install_quicklist(self):
self.DBUSMENU_SUPPORT = True
try:
from gi.repository import Unity, Dbusmenu
self.dbusmenu_checked = (Dbusmenu.MENUITEM_PROP_TOGGLE_STATE, Dbusmenu.MENUITEM_TOGGLE_STATE_CHECKED)
self.dbusmenu_unchecked = (Dbusmenu.MENUITEM_PROP_TOGGLE_STATE, Dbusmenu.MENUITEM_TOGGLE_STATE_UNCHECKED)
except:
self.DBUSMENU_SUPPORT = False
return
self.launcher = Unity.LauncherEntry.get_for_desktop_id("veromix.desktop")
self.quicklist = Dbusmenu.Menuitem.new()
self.dbusmenu_mute = Dbusmenu.Menuitem.new()
self.dbusmenu_mute.property_set(Dbusmenu.MENUITEM_PROP_LABEL, "Mute")
self.dbusmenu_mute.property_set(Dbusmenu.MENUITEM_PROP_TOGGLE_TYPE, Dbusmenu.MENUITEM_TOGGLE_CHECK)
self.dbusmenu_mute.property_set_int(Dbusmenu.MENUITEM_PROP_TOGGLE_STATE, Dbusmenu.MENUITEM_TOGGLE_STATE_UNCHECKED)
self.dbusmenu_mute.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True)
self.dbusmenu_mute.connect (Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, self.on_middle_click, None)
self.quicklist.child_append(self.dbusmenu_mute)
if not config().get_window_exit_on_close():
quit = Dbusmenu.Menuitem.new()
quit.property_set (Dbusmenu.MENUITEM_PROP_LABEL, "Shutdown Veromix")
quit.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True)
quit.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, Gtk.main_quit, None)
self.quicklist.child_append(quit)
self.launcher.set_property("quicklist", self.quicklist)
| gpl-3.0 | 801,493,414,747,905,500 | 43.6 | 142 | 0.637892 | false |
plypaul/airflow | tests/ti_deps/deps/dag_unpaused_dep.py | 20 | 1364 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from airflow.ti_deps.deps.dag_unpaused_dep import DagUnpausedDep
from fake_models import FakeDag, FakeTask, FakeTI
class DagUnpausedDepTest(unittest.TestCase):
def test_concurrency_reached(self):
"""
Test paused DAG should fail dependency
"""
dag = FakeDag(is_paused=True)
task = FakeTask(dag=dag)
ti = FakeTI(task=task, dag_id="fake_dag")
self.assertFalse(DagUnpausedDep().is_met(ti=ti, dep_context=None))
def test_all_conditions_met(self):
"""
Test all conditions met should pass dep
"""
dag = FakeDag(is_paused=False)
task = FakeTask(dag=dag)
ti = FakeTI(task=task, dag_id="fake_dag")
self.assertTrue(DagUnpausedDep().is_met(ti=ti, dep_context=None))
| apache-2.0 | -2,417,355,892,440,499,700 | 32.268293 | 74 | 0.68695 | false |
natefoo/tools-iuc | tools/ivar/prepare_amplicon_info.py | 5 | 1886 | #!/usr/bin/env python
# extends ivar trim's amplicon info parsing abilities
# to include calculation of amplicon regions from
# sets of nested (more than two) primers
import sys
# parse primers and their start positions from BED file
primer_starts = {}
with open(sys.argv[1]) as i:
for line in i:
f = line.strip().split('\t')
try:
if f[5] == '+':
primer_starts[f[3]] = int(f[1])
elif f[5] == '-':
primer_starts[f[3]] = int(f[2]) - 1
else:
raise ValueError()
except (IndexError, ValueError):
sys.exit(
'Primer BED file needs to be TAB-separated with the '
'following columns: '
'chrom, chromStart, chromEnd, name, score, strand, '
'where "chromStart", "chromEnd" need to be integer values '
'and "strand" needs to be either "+" or "-".'
)
# parse amplicon info and record outer primer names
with open(sys.argv[2]) as i:
ret_lines = []
for line in i:
first = last = None
for pname in line.strip().split('\t'):
try:
primer_start = primer_starts[pname]
except KeyError:
sys.exit(
'Amplicon info with primer name not found in '
f'primer BED file: "{pname}"'
)
if first is None or primer_start < primer_starts[first]:
first = pname
if last is None or primer_start > primer_starts[last]:
last = pname
if first == last:
sys.exit(
line
+ 'is not a proper amplicon info line.'
)
ret_lines.append(f'{first}\t{last}\n')
# write amended amplicon info
with open(sys.argv[3], 'w') as o:
o.writelines(ret_lines)
| mit | 3,584,726,299,554,713,000 | 32.087719 | 75 | 0.517497 | false |
M0ses/ansible | v2/ansible/plugins/inventory/directory.py | 16 | 1974 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
# Make coding more python3-ish
from __future__ import (division, print_function)
__metaclass__ = type
import os
from . aggregate import InventoryAggregateParser
class InventoryDirectoryParser(InventoryAggregateParser):
def __init__(self, inven_directory):
directory = inven_directory
names = os.listdir(inven_directory)
filtered_names = []
# Clean up the list of filenames
for filename in names:
# Skip files that end with certain extensions or characters
if any(filename.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
continue
# Skip hidden files
if filename.startswith('.') and not filename.startswith('.{0}'.format(os.path.sep)):
continue
# These are things inside of an inventory basedir
if filename in ("host_vars", "group_vars", "vars_plugins"):
continue
fullpath = os.path.join(directory, filename)
new_names.append(fullpath)
super(InventoryDirectoryParser, self).__init__(new_names)
def parse(self):
return super(InventoryDirectoryParser, self).parse()
| gpl-3.0 | 2,258,044,622,359,435,800 | 36.961538 | 111 | 0.652989 | false |
guard163/xen-api | scripts/time-vm-boots.py | 11 | 5426 | #!/usr/bin/env python
# Copyright (c) 2006-2007 XenSource, Inc.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Simple python example to demonstrate the event system. Logs into the server,
# registers for events on the VM_guest_metrics and computes the time taken for
# the guest agent to report an IP address.
import XenAPI, sys, time, inventory
vgm_to_vm = {}
def register_vm_metrics(session, vm_ref, vgm):
global vgm_to_vm
try:
# avoid putting invalid references in the cache
tmp = session.xenapi.VM_guest_metrics.get_other(vgm)
vgm_to_vm[vgm] = vm_ref
except:
pass
def vm_of_metrics(session, ref):
global vgm_to_vm
if not(ref in vgm_to_vm.keys()):
return None
return vgm_to_vm[ref]
interesting_vms = []
vm_boot_times = {}
boots_seen = 0
def dump_table(session):
global vm_boot_times
for vm in vm_boot_times.keys():
name = session.xenapi.VM.get_name_label(vm)
print "%s %s" % (name, vm_boot_times[vm])
def seen_possible_boot(session, vm):
global vm_boot_times
global interesting_vms
global boots_seen
if not(vm in vm_boot_times.keys()) and vm in interesting_vms:
t = time.strftime( "%Y%m%dT%H:%M:%SZ", time.gmtime())
vm_boot_times[vm] = t
boots_seen = boots_seen + 1
name = session.xenapi.VM.get_name_label(vm)
print >>sys.stdout, "%d %s %s" % (boots_seen, name, t)
print >>sys.stderr, "%d %s %s" % (boots_seen, name, t)
sys.stderr.flush()
def process_guest_metrics(session, ref, snapshot):
if "other" in snapshot.keys():
other = snapshot["other"]
if "feature-shutdown" in other.keys():
vm = vm_of_metrics(session, ref)
seen_possible_boot(session, vm)
def poll_metrics(session):
while True:
time.sleep(10)
all = session.xenapi.VM_guest_metrics.get_all_records()
for ref in all.keys():
snapshot = all[ref]
process_guest_metrics(session, ref, snapshot)
def process_metrics_event(session, ref, snapshot):
vm_ref = vm_of_metrics(session, ref)
if vm_ref == None:
return
if session.xenapi.VM.get_power_state(vm_ref) <> "Running":
return
other = {}
try:
other=session.xenapi.VM_guest_metrics.get_other(ref)
except Exception, e:
print repr(e)
if "feature-shutdown" in other.keys():
seen_possible_boot(session, vm_ref)
def watch_events_on_vm(session):
try:
# Register for events on all classes:
session.xenapi.event.register(["VM","VM_guest_metrics"])
while True:
try:
events = session.xenapi.event.next()
for event in events:
if event['operation'] == 'del':
continue
if event['class'] == 'vm' and event['operation'] == 'mod':
register_vm_metrics(session, event['ref'], event['snapshot']['guest_metrics'])
continue
if event['class'] == 'vm_guest_metrics':
process_metrics_event(session, event['ref'], event['snapshot'])
continue
except XenAPI.Failure, e:
if e.details <> [ "EVENTS_LOST" ]: raise
print "** Caught EVENTS_LOST error: some events may be lost"
# Check for the "EVENTS_LOST" error (happens if the event queue fills up on the
# server and some events have been lost). The only thing we can do is to
# unregister and then re-register again for future events.
# NB: A program which is waiting for a particular condition to become true would
# need to explicitly poll the state to make sure the condition hasn't become
# true in the gap.
session.xenapi.event.unregister(["VM", "VM_guest_metrics"])
session.xenapi.event.register(["VM", "VM_guest_metrics"])
finally:
session.xenapi.session.logout()
if __name__ == "__main__":
if len(sys.argv) <> 1:
print "Usage:"
print sys.argv[0]
print " -- watches all offline VMs for boots"
sys.exit(1)
# First acquire a valid session by logging in:
session = XenAPI.xapi_local()
session.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-timevmboots.py")
# We start watching all Halted VMs
all = session.xenapi.VM.get_all_records()
for vm in all.keys():
vm_rec = all[vm]
if vm_rec["power_state"] == "Halted" and vm_rec["is_a_template"] == False:
interesting_vms.append(vm)
print >> sys.stderr, "Watching %d offline VMs" % (len(interesting_vms))
#poll_metrics(session)
watch_events_on_vm(session)
| lgpl-2.1 | -3,798,214,336,379,238,400 | 34.933775 | 96 | 0.621821 | false |
bailabs/bench-v7 | bench/cli.py | 2 | 3464 | import click
import os, sys, logging, json, pwd, subprocess
from bench.utils import is_root, PatchError, drop_privileges, get_env_cmd, get_cmd_output, get_frappe
from bench.app import get_apps
from bench.config.common_site_config import get_config
from bench.commands import bench_command
logger = logging.getLogger('bench')
from_command_line = False
def cli():
global from_command_line
from_command_line = True
check_uid()
change_dir()
change_uid()
if len(sys.argv) > 2 and sys.argv[1] == "frappe":
return old_frappe_cli()
elif len(sys.argv) > 1 and sys.argv[1] in get_frappe_commands():
return frappe_cmd()
elif len(sys.argv) > 1 and sys.argv[1] in ("--site", "--verbose", "--force", "--profile"):
return frappe_cmd()
elif len(sys.argv) > 1 and sys.argv[1]=="--help":
print click.Context(bench_command).get_help()
print
print get_frappe_help()
return
elif len(sys.argv) > 1 and sys.argv[1] in get_apps():
return app_cmd()
else:
try:
# NOTE: this is the main bench command
bench_command()
except PatchError:
sys.exit(1)
def check_uid():
if cmd_requires_root() and not is_root():
print 'superuser privileges required for this command'
sys.exit(1)
def cmd_requires_root():
if len(sys.argv) > 2 and sys.argv[2] in ('production', 'sudoers', 'lets-encrypt', 'fonts', 'reload-nginx', 'firewall', 'ssh-port'):
return True
if len(sys.argv) >= 2 and sys.argv[1] in ('patch', 'renew-lets-encrypt', 'disable-production'):
return True
def change_dir():
if os.path.exists('config.json') or "init" in sys.argv:
return
dir_path_file = '/etc/frappe_bench_dir'
if os.path.exists(dir_path_file):
with open(dir_path_file) as f:
dir_path = f.read().strip()
if os.path.exists(dir_path):
os.chdir(dir_path)
def change_uid():
if is_root() and not cmd_requires_root():
frappe_user = get_config(".").get('frappe_user')
if frappe_user:
drop_privileges(uid_name=frappe_user, gid_name=frappe_user)
os.environ['HOME'] = pwd.getpwnam(frappe_user).pw_dir
else:
print 'You should not run this command as root'
sys.exit(1)
def old_frappe_cli(bench_path='.'):
f = get_frappe(bench_path=bench_path)
os.chdir(os.path.join(bench_path, 'sites'))
os.execv(f, [f] + sys.argv[2:])
def app_cmd(bench_path='.'):
f = get_env_cmd('python', bench_path=bench_path)
os.chdir(os.path.join(bench_path, 'sites'))
os.execv(f, [f] + ['-m', 'frappe.utils.bench_helper'] + sys.argv[1:])
def frappe_cmd(bench_path='.'):
f = get_env_cmd('python', bench_path=bench_path)
os.chdir(os.path.join(bench_path, 'sites'))
os.execv(f, [f] + ['-m', 'frappe.utils.bench_helper', 'frappe'] + sys.argv[1:])
def get_frappe_commands(bench_path='.'):
python = get_env_cmd('python', bench_path=bench_path)
sites_path = os.path.join(bench_path, 'sites')
if not os.path.exists(sites_path):
return []
try:
return json.loads(get_cmd_output("{python} -m frappe.utils.bench_helper get-frappe-commands".format(python=python), cwd=sites_path))
except subprocess.CalledProcessError:
return []
def get_frappe_help(bench_path='.'):
python = get_env_cmd('python', bench_path=bench_path)
sites_path = os.path.join(bench_path, 'sites')
if not os.path.exists(sites_path):
return []
try:
out = get_cmd_output("{python} -m frappe.utils.bench_helper get-frappe-help".format(python=python), cwd=sites_path)
return "Framework commands:\n" + out.split('Commands:')[1]
except subprocess.CalledProcessError:
return ""
| gpl-3.0 | 7,096,900,127,506,203,000 | 30.779817 | 134 | 0.681005 | false |
planetlabs/datalake | client/datalake/tests/conftest.py | 2 | 3844 | # Copyright 2015 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import pytest
import random
import string
import os
import six
try:
from moto import mock_s3
import boto.s3
from boto.s3.key import Key
from six.moves.urllib.parse import urlparse
import simplejson as json
except ImportError:
# if developers use s3-test features without having installed s3 stuff,
# things will fail. So it goes.
pass
@pytest.fixture
def basic_metadata():
return {
'version': 0,
'start': 1426809600000,
'end': 1426895999999,
'path': '/var/log/apache/access.log',
'where': 'nebraska',
'what': 'apache',
'hash': '12345',
'work_id': None,
}
def random_word(length):
if six.PY2:
lowercase = string.lowercase
else:
lowercase = string.ascii_lowercase
return ''.join(random.choice(lowercase) for i in range(length))
def random_hex(length):
return ('%0' + str(length) + 'x') % random.randrange(16**length)
def random_interval():
year_2010 = 1262304000000
five_years = 5 * 365 * 24 * 60 * 60 * 1000
three_days = 3 * 24 * 60 * 60 * 1000
start = year_2010 + random.randint(0, five_years)
end = start + random.randint(0, three_days)
return start, end
def random_work_id():
if random.randint(0, 1):
return None
return '{}-{}'.format(random_word(5), random.randint(0, 2**15))
def random_abs_dir():
num_dirs = random.randrange(1, 4)
lengths = [random.randint(2, 10) for i in range(num_dirs)]
dirs = [random_word(i) for i in lengths]
return '/' + '/'.join(dirs)
@pytest.fixture
def random_metadata():
start, end = random_interval()
what = random_word(10)
return {
'version': 0,
'start': start,
'end': end,
'path': os.path.join(random_abs_dir(), what),
'work_id': random_work_id(),
'where': random_word(10),
'what': what,
'id': random_hex(40),
'hash': random_hex(40),
}
@pytest.fixture
def tmpfile(tmpdir):
name = random_word(10)
def get_tmpfile(content):
f = tmpdir.join(name)
f.write(content)
return str(f)
return get_tmpfile
@pytest.fixture
def aws_connector(request):
def create_connection(mocker, connector):
mock = mocker()
mock.start()
def tear_down():
mock.stop()
request.addfinalizer(tear_down)
return connector()
return create_connection
@pytest.fixture
def s3_connection(aws_connector):
return aws_connector(mock_s3, boto.connect_s3)
@pytest.fixture
def s3_bucket_maker(s3_connection):
def maker(bucket_name):
return s3_connection.create_bucket(bucket_name)
return maker
@pytest.fixture
def s3_file_maker(s3_bucket_maker):
def maker(bucket, key, content, metadata):
b = s3_bucket_maker(bucket)
k = Key(b)
k.key = key
if metadata:
k.set_metadata('datalake', json.dumps(metadata))
k.set_contents_from_string(content)
return maker
@pytest.fixture
def s3_file_from_metadata(s3_file_maker):
def maker(url, metadata):
url = urlparse(url)
assert url.scheme == 's3'
s3_file_maker(url.netloc, url.path, '', metadata)
return maker
| apache-2.0 | -2,942,168,831,401,576,400 | 22.439024 | 79 | 0.629553 | false |
CounterpartyXCP/counterparty-lib | tools/updatetxids.py | 2 | 2041 | #!/usr/bin/python3
import sys
import os
import re
import shutil
import pprint
import binascii
COMMIT = "8906a8188ba841599f66627157e29a270ca838cf"
UNITTEST_FIXTURE_SQL = "counterpartylib/test/fixtures/scenarios/unittest_fixture.sql"
UNITTEST_VECTORS_PY = "counterpartylib/test/fixtures/vectors.py"
REGEX = r"^(?P<change>[+-])INSERT INTO transactions VALUES\((?P<tx_index>\d+),'(?P<tx_hash>.+?)',"
dryrun = '--dry-run' in sys.argv or '--dryrun' in sys.argv
args = list(filter(lambda a: a not in [__file__, '--dry-run', '--dryrun'], sys.argv))
diffcmd = 'git --no-pager diff %s' % UNITTEST_FIXTURE_SQL
if len(args) == 1:
commit = args[0]
diffcmd = 'git --no-pager show %s %s' % (commit, UNITTEST_FIXTURE_SQL)
elif len(args) > 1:
raise Exception("Too many arguments")
def to_literal_byte_string(h):
r = ""
for x in binascii.unhexlify(h):
if x >= 32 and x <= 126:
# print(x, "[%s]" % chr(x))
r += chr(x)
else:
# print(x, hex(x), "\\x" + ("00" + hex(x).replace('0x', ''))[-2:])
r += "\\x" + ("00" + hex(x).replace('0x', ''))[-2:]
return r
old_txid_map = {}
new_txid_map = {}
with os.popen(diffcmd) as diff:
lines = diff.readlines()
for line in lines:
m = re.match(REGEX, line)
if m:
if m.group('change') == '+':
new_txid_map[m.group('tx_index')] = m.group('tx_hash')
else:
old_txid_map[m.group('tx_index')] = m.group('tx_hash')
with open(UNITTEST_VECTORS_PY, 'r') as f:
filedata = f.read()
for tx_index, old_txid in sorted(old_txid_map.items(), key=lambda kv: kv[0]):
new_txid = new_txid_map[tx_index]
print("%s -> %s" % (old_txid, new_txid))
filedata = filedata.replace(old_txid, new_txid)
filedata = filedata.replace(to_literal_byte_string(old_txid), to_literal_byte_string(new_txid))
if not dryrun:
assert filedata
with open(UNITTEST_VECTORS_PY, 'w') as f:
f.write(filedata)
else:
print("DRYRUN")
| mit | 8,013,743,029,428,066,000 | 27.347222 | 103 | 0.585007 | false |
jalavik/invenio | invenio/modules/upgrader/upgrades/invenio_2013_03_18_bibauthorid_search_engine_tables.py | 15 | 1604 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import warnings
from invenio.legacy.dbquery import run_sql
depends_on = ['invenio_release_1_1_0']
def info():
return "Creates the bibauthorid search engine tables"
def do_upgrade():
warnings.filterwarnings('ignore')
run_sql("""CREATE TABLE IF NOT EXISTS `aidDENSEINDEX` (
`name_id` INT( 10 ) NOT NULL,
`person_name` VARCHAR( 256 ) NOT NULL,
`personids` LONGBLOB NOT NULL,
PRIMARY KEY (`name_id`)
) ENGINE=MyISAM""")
run_sql("""CREATE TABLE IF NOT EXISTS `aidINVERTEDLISTS` (
`qgram` VARCHAR( 4 ) NOT NULL,
`inverted_list` LONGBLOB NOT NULL,
`list_cardinality` INT( 10 ) NOT NULL,
PRIMARY KEY (`qgram`)
) ENGINE=MyISAM""")
def estimate():
return 1
| gpl-2.0 | 5,441,626,564,833,689,000 | 33.869565 | 74 | 0.65212 | false |
beni55/django | tests/test_client_regress/views.py | 25 | 5067 | import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.test import Client
from django.test.client import CONTENT_TYPE_RE
from django.test.utils import setup_test_environment
from django.utils.six.moves.urllib.parse import urlencode
class CustomTestException(Exception):
pass
def no_template_view(request):
"A simple view that expects a GET request, and returns a rendered template"
return HttpResponse("No template used. Sample content: twice once twice. Content ends.")
def staff_only_view(request):
"A view that can only be visited by staff. Non staff members get an exception"
if request.user.is_staff:
return HttpResponse('')
else:
raise CustomTestException()
def get_view(request):
"A simple login protected view"
return HttpResponse("Hello world")
get_view = login_required(get_view)
def request_data(request, template='base.html', data='sausage'):
"A simple view that returns the request data in the context"
return render_to_response(template, {
'get-foo': request.GET.get('foo'),
'get-bar': request.GET.get('bar'),
'post-foo': request.POST.get('foo'),
'post-bar': request.POST.get('bar'),
'data': data,
})
def view_with_argument(request, name):
"""A view that takes a string argument
The purpose of this view is to check that if a space is provided in
the argument, the test framework unescapes the %20 before passing
the value to the view.
"""
if name == 'Arthur Dent':
return HttpResponse('Hi, Arthur')
else:
return HttpResponse('Howdy, %s' % name)
def nested_view(request):
"""
A view that uses test client to call another view.
"""
setup_test_environment()
c = Client()
c.get("/no_template_view")
return render_to_response('base.html', {'nested': 'yes'})
def login_protected_redirect_view(request):
"A view that redirects all requests to the GET view"
return HttpResponseRedirect('/get_view/')
login_protected_redirect_view = login_required(login_protected_redirect_view)
def redirect_to_self_with_changing_query_view(request):
query = request.GET.copy()
query['counter'] += '0'
return HttpResponseRedirect('/redirect_to_self_with_changing_query_view/?%s' % urlencode(query))
def set_session_view(request):
"A view that sets a session variable"
request.session['session_var'] = 'YES'
return HttpResponse('set_session')
def check_session_view(request):
"A view that reads a session variable"
return HttpResponse(request.session.get('session_var', 'NO'))
def request_methods_view(request):
"A view that responds with the request method"
return HttpResponse('request method: %s' % request.method)
def return_unicode(request):
return render_to_response('unicode.html')
def return_undecodable_binary(request):
return HttpResponse(
b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e ReportLab Generated PDF document http://www.reportlab.com'
)
def return_json_file(request):
"A view that parses and returns a JSON string as a file."
match = CONTENT_TYPE_RE.match(request.META['CONTENT_TYPE'])
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
# This just checks that the uploaded data is JSON
obj_dict = json.loads(request.body.decode(charset))
obj_json = json.dumps(obj_dict, cls=DjangoJSONEncoder, ensure_ascii=False)
response = HttpResponse(obj_json.encode(charset), status=200,
content_type='application/json; charset=%s' % charset)
response['Content-Disposition'] = 'attachment; filename=testfile.json'
return response
def check_headers(request):
"A view that responds with value of the X-ARG-CHECK header"
return HttpResponse('HTTP_X_ARG_CHECK: %s' % request.META.get('HTTP_X_ARG_CHECK', 'Undefined'))
def body(request):
"A view that is requested with GET and accesses request.body. Refs #14753."
return HttpResponse(request.body)
def read_all(request):
"A view that is requested with accesses request.read()."
return HttpResponse(request.read())
def read_buffer(request):
"A view that is requested with accesses request.read(LARGE_BUFFER)."
return HttpResponse(request.read(99999))
def request_context_view(request):
# Special attribute that won't be present on a plain HttpRequest
request.special_path = request.path
return render_to_response('request_context.html', context_instance=RequestContext(request, {}))
def render_template_multiple_times(request):
"""A view that renders a template multiple times."""
return HttpResponse(
render_to_string('base.html') + render_to_string('base.html'))
| bsd-3-clause | -6,422,591,148,635,837,000 | 31.273885 | 100 | 0.707914 | false |
wenluma/swift | utils/gyb_syntax_support/Node.py | 6 | 2271 | from __future__ import print_function
import sys # noqa: I201
from kinds import SYNTAX_BASE_KINDS, kind_to_type, lowercase_first_word
def error(msg):
print('error: ' + msg, file=sys.stderr)
sys.exit(-1)
class Node(object):
"""
A Syntax node, possibly with children.
If the kind is "SyntaxCollection", then this node is considered a Syntax
Collection that will expose itself as a typedef rather than a concrete
subclass.
"""
def __init__(self, name, kind=None, children=None,
element=None, element_name=None):
self.syntax_kind = name
self.swift_syntax_kind = lowercase_first_word(name)
self.name = kind_to_type(self.syntax_kind)
self.children = children or []
self.base_kind = kind
self.base_type = kind_to_type(self.base_kind)
if self.base_kind not in SYNTAX_BASE_KINDS:
error("unknown base kind '%s' for node '%s'" %
(self.base_kind, self.syntax_kind))
self.collection_element = element or ""
# If there's a preferred name for the collection element that differs
# from its supertype, use that.
self.collection_element_name = element_name or self.collection_element
self.collection_element_type = kind_to_type(self.collection_element)
def is_base(self):
"""
Returns `True` if this node declares one of the base syntax kinds.
"""
return self.syntax_kind in SYNTAX_BASE_KINDS
def is_syntax_collection(self):
"""
Returns `True` if this node is a subclass of SyntaxCollection.
"""
return self.base_kind == "SyntaxCollection"
def requires_validation(self):
"""
Returns `True` if this node should have a `valitate` method associated.
"""
return self.is_buildable()
def is_unknown(self):
"""
Returns `True` if this node is an `Unknown` syntax subclass.
"""
return "Unknown" in self.syntax_kind
def is_buildable(self):
"""
Returns `True` if this node should have a builder associated.
"""
return not self.is_base() and \
not self.is_unknown() and \
not self.is_syntax_collection()
| mit | -3,460,415,450,553,501,700 | 31.913043 | 79 | 0.613827 | false |
tanglei528/horizon | openstack_dashboard/dashboards/admin/images/tests.py | 9 | 4831 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.admin.images import tables
class ImageCreateViewTest(test.BaseAdminViewTests):
def test_admin_image_create_view_uses_admin_template(self):
res = self.client.get(
reverse('horizon:admin:images:create'))
self.assertTemplateUsed(res, 'admin/images/create.html')
class ImagesViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_images_list(self):
filters = {'is_public': None}
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters) \
.AndReturn([self.images.list(),
False])
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:admin:images:index'))
self.assertTemplateUsed(res, 'admin/images/index.html')
self.assertEqual(len(res.context['images_table'].data),
len(self.images.list()))
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_images_list_get_pagination(self):
images = self.images.list()[:5]
filters = {'is_public': None}
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters) \
.AndReturn([images,
True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters) \
.AndReturn([images[:2],
True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[2].id,
paginate=True,
filters=filters) \
.AndReturn([images[2:4],
True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[4].id,
paginate=True,
filters=filters) \
.AndReturn([images[4:],
True])
self.mox.ReplayAll()
url = reverse('horizon:admin:images:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['images_table'].data),
len(images))
self.assertTemplateUsed(res, 'admin/images/index.html')
res = self.client.get(url)
# get first page with 2 items
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "?".join([reverse('horizon:admin:images:index'),
"=".join([tables.AdminImagesTable._meta.pagination_param,
images[2].id])])
res = self.client.get(url)
# get second page (items 2-4)
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "?".join([reverse('horizon:admin:images:index'),
"=".join([tables.AdminImagesTable._meta.pagination_param,
images[4].id])])
res = self.client.get(url)
# get third page (item 5)
self.assertEqual(len(res.context['images_table'].data),
1)
| apache-2.0 | -2,142,590,549,254,058,200 | 42.133929 | 78 | 0.526392 | false |
sloanyang/android_external_webkit | Tools/Scripts/webkitpy/layout_tests/layout_package/test_results.py | 15 | 2560 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cPickle
import test_failures
class TestResult(object):
"""Data object containing the results of a single test."""
@staticmethod
def loads(str):
return cPickle.loads(str)
def __init__(self, filename, failures=None, test_run_time=None):
self.filename = filename
self.failures = failures or []
self.test_run_time = test_run_time or 0
# FIXME: Setting this in the constructor makes this class hard to mutate.
self.type = test_failures.determine_result_type(failures)
def __eq__(self, other):
return (self.filename == other.filename and
self.failures == other.failures and
self.test_run_time == other.test_run_time)
def __ne__(self, other):
return not (self == other)
def has_failure_matching_types(self, types):
for failure in self.failures:
if type(failure) in types:
return True
return False
def dumps(self):
return cPickle.dumps(self)
| gpl-2.0 | 207,002,302,706,679,140 | 39.634921 | 81 | 0.715234 | false |
za-creature/puls | puls/models/targets.py | 1 | 2599 | # coding=utf-8
from __future__ import absolute_import, unicode_literals, division
from puls.models import auto_modified, Searchable, ReferenceField
from puls import app
import mongoengine as mge
import flask_wtf
import datetime
import wtforms as wtf
@auto_modified
class Target(app.db.Document, Searchable):
meta = {"indexes": [
[("name", "text"), ("description", "text")],
]}
name = mge.StringField(required=True, max_length=256, unique=True)
icon = mge.StringField(required=True, max_length=256)
description = mge.StringField(default="", max_length=4096)
# dates
created = mge.DateTimeField(default=datetime.datetime.now)
modified = mge.DateTimeField(default=datetime.datetime.now)
class TargetForm(flask_wtf.Form):
name = wtf.TextField("Name", [wtf.validators.Required(),
wtf.validators.Length(max=256)])
icon = wtf.TextField("Glyphicon", [wtf.validators.Required(),
wtf.validators.Length(max=256)])
description = wtf.TextAreaField("Description",
[wtf.validators.Length(max=4096)])
class TargetField(ReferenceField):
reference_class = Target
class TargetWeightSpec(app.db.EmbeddedDocument):
target = mge.ReferenceField(Target, required=True)
value = mge.FloatField(required=True)
class TargetWeightSpecForm(flask_wtf.Form):
target = TargetField("Target", [wtf.validators.InputRequired()])
value = wtf.FloatField("Weight", [wtf.validators.InputRequired()])
class TargetWeightField(wtf.FieldList):
def __init__(self, *args, **kwargs):
ctor = super(TargetWeightField, self).__init__
ctor(wtf.FormField(TargetWeightSpecForm),
*args, **kwargs)
def process(self, formdata, data=[]):
targets = set()
new = []
try:
for entry in data:
if isinstance(entry, TargetWeightSpec):
new.append(entry)
targets.add(entry.target)
except TypeError:
pass
for target in Target.objects:
if target not in targets:
new.append(TargetWeightSpec(target=target, value=0))
super(TargetWeightField, self).process(formdata, new)
def populate_obj(self, obj, name):
if isinstance(obj, (app.db.Document, app.db.EmbeddedDocument)):
setattr(obj, name, [TargetWeightSpec(**entry.data)
for entry in self])
else:
super(TargetWeightField, self).populate_obj(obj, name)
| mit | -78,020,548,815,455,340 | 32.320513 | 71 | 0.630242 | false |
jfmorcillo/mss | modules/mds_kerberos/__init__.py | 3 | 3789 | # -*- coding: UTF-8 -*-
#
# (c) 2010 Mandriva, http://www.mandriva.com/
#
# $Id$
#
# This file is part of Mandriva Server Setup
#
# MSS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MSS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MSS; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import re
import os
import subprocess
def get_config_info():
return ("setup-kerberos.sh", ["realm", "dns_domain_name","kdc_host_name", "kdc_port", "kadmin_port", "kdc_key_passwd",
"dns_lookup_kdc", "dns_lookup_realm", "tgs_enctypes", "tkt_enctypes", "permitted_enctypes","allow_weak_enctypes", "clock_skew"])
def get_current_config(module):
config = {}
KRB_CONF = "/etc/krb5.conf"
KRB_CONF_PARAMS = {
"^\s*default_realm\s*=\s*(.*)" : ["realm"],
"^\s*admin_server\s*=\s*([a-zA-Z0-9_]+)\.([a-zA-Z0-9_.]+):(\d+)" : ["kdc_host_name", "dns_domain_name", "kadmin_port"],
"^\s*default_tgs_enctypes\s*=\s*(.*)" : ["tgs_enctypes"],
"^\s*default_tkt_enctypes\s*=\s*(.*)" : ["tkt_enctypes"],
"^\s*permitted_enctypes\s*=\s*(.*)" : ["permitted_enctypes"],
"^\s*clockskew\s*=\s*(.*)" : ["clock_skew"],
"^\s*allow_weak_crypto\s*=\s*(.*)" : ["allow_weak_enctypes"],
"^\s*dns_lookup_kdc\s*=\s*(.*)" : ["dns_lookup_kdc"],
"^\s*dns_lookup_realm\s*=\s*(.*)" : ["dns_lookup_realm"]
}
KDC_CONF = "/etc/kerberos/krb5kdc/kdc.conf"
KDC_CONF_PARAMS = { "^\s*kdc_ports\s*=\s*(\d+)" : ["kdc_port"]}
config = get_config_from_file(KRB_CONF, KRB_CONF_PARAMS)
config.update(get_config_from_file(KDC_CONF, KDC_CONF_PARAMS))
for key in config.keys():
if config[key] == "true":
config[key] = "on"
elif config[key] == "false":
config[key] = "off"
return config
def get_config_from_file(file, configMap):
config = {}
if os.path.exists(file):
f = open(file)
data = f.read()
f.close()
maxParamsCount = 0
for regexp, values in configMap.items():
maxParamsCount += len(values)
paramsCount = 0
for line in data.split("\n"):
for regexp, values in configMap.items():
matches = re.match(regexp,line)
if matches:
groups = matches.groups()
for index in range(len(groups)):
config[values[index]] = groups[index]
paramsCount += 1
break
if (paramsCount == maxParamsCount):
break
return config
def get_default_domain_name():
return subprocess.Popen(["dnsdomainname"], stdout=subprocess.PIPE).communicate()[0].strip()
def get_default_host_name():
return subprocess.Popen(["hostname","-s"], stdout=subprocess.PIPE).communicate()[0].strip()
def get_default_realm():
return get_default_domain_name().upper()
def check_realm(realm):
if not re.match('^[A-Z0-9-\.]+\.[A-Z]{2,}$', realm):
return "Incorrect realm."
return None
def check_kdc_host_name(host_name):
if not re.match('^[a-zA-Z0-9-]+$', host_name):
return "Incorrect host name."
return None
def check_port(port):
if not re.match('^([0-5]?\d?\d?\d?\d|6[0-4]\d\d\d|65[0-4]\d\d|655[0-2]\d|6553[0-5])$', port):
return "Incorrect port number."
return None
def check_configured():
checker = "%s/check_configured.sh" % os.path.dirname(os.path.abspath(__file__))
code = subprocess.call([checker])
return bool(code)
| gpl-3.0 | -5,281,570,677,227,283,000 | 32.530973 | 132 | 0.630773 | false |
ArthurGarnier/SickRage | sickbeard/providers/nyaa.py | 5 | 4609 | # coding=utf-8
# Author: Mr_Orange
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
from sickbeard import logger, tvcache
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class NyaaProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, 'Nyaa')
self.public = True
self.supports_absolute_numbering = True
self.anime_only = True
self.url = 'https://nyaa.si'
self.minseed = 0
self.minleech = 0
self.confirmed = False
self.cache = tvcache.TVCache(self, min_time=20) # only poll Nyaa every 20 minutes max
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
if self.show and not self.show.is_anime:
return results
for mode in search_strings:
items = []
logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log('Search string: {0}'.format(search_string.decode('utf-8')), logger.DEBUG)
search_params = {
'page': 'rss',
'c': '1_0', # Category: All anime
's': 'id', # Sort by: 'id'=Date / 'size' / 'name' / 'seeders' / 'leechers' / 'downloads'
'o': 'desc', # Sort direction: asc / desc
'f': ('0', '2')[self.confirmed] # Quality filter: 0 = None / 1 = No Remakes / 2 = Trusted Only
}
if mode != 'RSS':
search_params['q'] = search_string
results = []
data = self.cache.get_rss_feed(self.url, params=search_params)['entries']
if not data:
logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
continue
for curItem in data:
try:
title = curItem['title']
download_url = curItem['link']
if not all([title, download_url]):
continue
seeders = try_int(curItem['nyaa_seeders'])
leechers = try_int(curItem['nyaa_leechers'])
torrent_size = curItem['nyaa_size']
info_hash = curItem['nyaa_infohash']
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log('Discarding torrent because it doesn\'t meet the'
' minimum seeders or leechers: {0} (S:{1} L:{2})'.format
(title, seeders, leechers), logger.DEBUG)
continue
size = convert_size(torrent_size, units=['BYTES', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']) or -1
result = {'title': title, 'link': download_url, 'size': size,
'seeders': seeders, 'leechers': leechers, 'hash': info_hash}
if mode != 'RSS':
logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
(title, seeders, leechers), logger.DEBUG)
items.append(result)
except StandardError:
continue
# For each search mode sort all the items by seeders
items.sort(key=lambda d: d.get('seeders', 0), reverse=True)
results += items
return results
provider = NyaaProvider()
| gpl-3.0 | 6,404,418,442,151,125,000 | 39.787611 | 115 | 0.536125 | false |
awalls-cx18/gnuradio | gr-dtv/python/dtv/__init__.py | 7 | 1262 | #
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# The presence of this file turns this directory into a Python package
'''
Blocks and utilities for digital TV module.
'''
from __future__ import absolute_import
from __future__ import unicode_literals
import os
try:
from .dtv_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from .dtv_swig import *
# Import pure python code here
from .atsc_rx import *
| gpl-3.0 | -6,107,475,120,239,251,000 | 30.55 | 70 | 0.729794 | false |
PythonCharmers/autoporter | pypi_scanner.py | 1 | 2078 | from __future__ import (absolute_import, division,
print_function, unicode_literals)
import caniusepython3 as ciu
from . import utils
from . import classifier_finder
class pypi_scanner(object):
def __init__(self):
pass
def _get_all_github_packages(self):
with ciu.pypi.pypi_client() as client:
list_of_packages = client.search({'home_page': 'github'})
list_of_package_names = [v['name'] for v in list_of_packages]
return list_of_package_names
def _browse_classifier(self, classifiers):
"""
classifiers - list of classifiers
"""
with ciu.pypi.pypi_client() as client:
list_of_packages = client.browse(list(classifiers))
list_of_package_names = [v[0] for v in list_of_packages]
return list_of_package_names
def _get_all_python_packages(self):
with ciu.pypi.pypi_client() as client:
list_of_package_names = client.list_packages()
return list_of_package_names
def _get_all_python3_packages(self):
c = classifier_finder.classifier_finder('Programming Language :: Python :: 3')
python_classifiers = c.get_classifiers()
return self._browse_classifier(python_classifiers)
def _get_python2_only_packages(self):
"""
returns a list of all PyPI packages that
is python 2 compatible only.
"""
python_packages = set(self._get_all_python_packages())
python3_packages = set(self._get_all_python3_packages())
python2_only_packages = python_packages.difference(python3_packages)
return list(python2_only_packages)
def get_python2_github_packages(self):
"""
returns a list of python 2 only packages with github repos
"""
python2_only_packages = set(self._get_python2_only_packages())
github_packages = set(self._get_all_github_packages())
python2_github_packages = python2_only_packages.intersection(github_packages)
return list(python2_github_packages) | mit | 5,686,388,699,354,582,000 | 32.532258 | 86 | 0.638595 | false |
Johnzero/erp | openerp/addons/lunch/__init__.py | 9 | 1098 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import lunch
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -9,005,595,916,774,862,000 | 41.230769 | 79 | 0.615665 | false |
johan--/Geotrek | geotrek/common/tests/test_conf.py | 3 | 1552 | # -*- encoding: utf-8 -*-
import os
from django.test import TestCase
from django.core.exceptions import ImproperlyConfigured
from geotrek.settings import EnvIniReader
from .. import check_srid_has_meter_unit
class StartupCheckTest(TestCase):
def test_error_is_raised_if_srid_is_not_meters(self):
delattr(check_srid_has_meter_unit, '_checked')
with self.settings(SRID=4326):
self.assertRaises(ImproperlyConfigured, check_srid_has_meter_unit, None)
class EnvIniTests(TestCase):
ini_file = os.path.join('conf.ini')
def setUp(self):
with open(self.ini_file, 'w') as f:
f.write("""[settings]\nkey = value\nkeyint = 3\nlist = a, b,c\nfloats = 0.4 ,1.3""")
self.envini = EnvIniReader(self.ini_file)
os.environ['KEYINT'] = '4'
def test_existing_key(self):
self.assertEqual(self.envini.get('key'), 'value')
self.assertEqual(self.envini.get('keyint'), '4')
self.assertEqual(self.envini.get('keyint', env=False), '3')
def test_missing_key(self):
self.assertEqual(self.envini.get('unknown', 'void'), 'void')
self.assertEqual(self.envini.get('unknown', None), None)
self.assertRaises(ImproperlyConfigured, self.envini.get, 'unknown')
def test_helpers(self):
self.assertEqual(self.envini.getint('keyint'), 4)
self.assertEqual(self.envini.getstrings('list'), ['a', 'b', 'c'])
self.assertEqual(self.envini.getfloats('floats'), [0.4, 1.3])
def tearDown(self):
os.remove(self.ini_file)
| bsd-2-clause | -3,658,385,541,986,518,500 | 34.272727 | 96 | 0.652706 | false |
Megacoin2/Megacoin | contrib/pyminer/pyminer.py | 1 | 6478 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Copyright (c) 2013-2079 Dr. Kimoto Chan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class MegacoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = MegacoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7950
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit | -9,210,656,487,868,313,000 | 24.604743 | 84 | 0.649429 | false |
gylian/sickrage | sickbeard/common.py | 1 | 14405 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os.path
import operator
import platform
import re
import uuid
INSTANCE_ID = str(uuid.uuid1())
USER_AGENT = ('SickRage/(' + platform.system() + '; ' + platform.release() + '; ' + INSTANCE_ID + ')')
mediaExtensions = ['avi', 'mkv', 'mpg', 'mpeg', 'wmv',
'ogm', 'mp4', 'iso', 'img', 'divx',
'm2ts', 'm4v', 'ts', 'flv', 'f4v',
'mov', 'rmvb', 'vob', 'dvr-ms', 'wtv',
'ogv', '3gp', 'webm', 'tp']
subtitleExtensions = ['srt', 'sub', 'ass', 'idx', 'ssa']
cpu_presets = {'HIGH': 0.1,
'NORMAL': 0.05,
'LOW': 0.01
}
### Other constants
MULTI_EP_RESULT = -1
SEASON_RESULT = -2
### Notification Types
NOTIFY_SNATCH = 1
NOTIFY_DOWNLOAD = 2
NOTIFY_SUBTITLE_DOWNLOAD = 3
NOTIFY_GIT_UPDATE = 4
NOTIFY_GIT_UPDATE_TEXT = 5
notifyStrings = {}
notifyStrings[NOTIFY_SNATCH] = "Started Download"
notifyStrings[NOTIFY_DOWNLOAD] = "Download Finished"
notifyStrings[NOTIFY_SUBTITLE_DOWNLOAD] = "Subtitle Download Finished"
notifyStrings[NOTIFY_GIT_UPDATE] = "SickRage Updated"
notifyStrings[NOTIFY_GIT_UPDATE_TEXT] = "SickRage Updated To Commit#: "
### Episode statuses
UNKNOWN = -1 # should never happen
UNAIRED = 1 # episodes that haven't aired yet
SNATCHED = 2 # qualified with quality
WANTED = 3 # episodes we don't have but want to get
DOWNLOADED = 4 # qualified with quality
SKIPPED = 5 # episodes we don't want
ARCHIVED = 6 # episodes that you don't have locally (counts toward download completion stats)
IGNORED = 7 # episodes that you don't want included in your download stats
SNATCHED_PROPER = 9 # qualified with quality
SUBTITLED = 10 # qualified with quality
FAILED = 11 #episode downloaded or snatched we don't want
SNATCHED_BEST = 12 # episode redownloaded using best quality
NAMING_REPEAT = 1
NAMING_EXTEND = 2
NAMING_DUPLICATE = 4
NAMING_LIMITED_EXTEND = 8
NAMING_SEPARATED_REPEAT = 16
NAMING_LIMITED_EXTEND_E_PREFIXED = 32
multiEpStrings = {}
multiEpStrings[NAMING_REPEAT] = "Repeat"
multiEpStrings[NAMING_SEPARATED_REPEAT] = "Repeat (Separated)"
multiEpStrings[NAMING_DUPLICATE] = "Duplicate"
multiEpStrings[NAMING_EXTEND] = "Extend"
multiEpStrings[NAMING_LIMITED_EXTEND] = "Extend (Limited)"
multiEpStrings[NAMING_LIMITED_EXTEND_E_PREFIXED] = "Extend (Limited, E-prefixed)"
class Quality:
NONE = 0 # 0
SDTV = 1 # 1
SDDVD = 1 << 1 # 2
HDTV = 1 << 2 # 4
RAWHDTV = 1 << 3 # 8 -- 720p/1080i mpeg2 (trollhd releases)
FULLHDTV = 1 << 4 # 16 -- 1080p HDTV (QCF releases)
HDWEBDL = 1 << 5 # 32
FULLHDWEBDL = 1 << 6 # 64 -- 1080p web-dl
HDBLURAY = 1 << 7 # 128
FULLHDBLURAY = 1 << 8 # 256
# put these bits at the other end of the spectrum, far enough out that they shouldn't interfere
UNKNOWN = 1 << 15 # 32768
qualityStrings = {NONE: "N/A",
UNKNOWN: "Unknown",
SDTV: "SD TV",
SDDVD: "SD DVD",
HDTV: "HD TV",
RAWHDTV: "RawHD TV",
FULLHDTV: "1080p HD TV",
HDWEBDL: "720p WEB-DL",
FULLHDWEBDL: "1080p WEB-DL",
HDBLURAY: "720p BluRay",
FULLHDBLURAY: "1080p BluRay"}
statusPrefixes = {DOWNLOADED: "Downloaded",
SNATCHED: "Snatched",
SNATCHED_PROPER: "Snatched (Proper)",
FAILED: "Failed",
SNATCHED_BEST: "Snatched (Best)"}
@staticmethod
def _getStatusStrings(status):
toReturn = {}
for x in Quality.qualityStrings.keys():
toReturn[Quality.compositeStatus(status, x)] = Quality.statusPrefixes[status] + " (" + \
Quality.qualityStrings[x] + ")"
return toReturn
@staticmethod
def combineQualities(anyQualities, bestQualities):
anyQuality = 0
bestQuality = 0
if anyQualities:
anyQuality = reduce(operator.or_, anyQualities)
if bestQualities:
bestQuality = reduce(operator.or_, bestQualities)
return anyQuality | (bestQuality << 16)
@staticmethod
def splitQuality(quality):
anyQualities = []
bestQualities = []
for curQual in Quality.qualityStrings.keys():
if curQual & quality:
anyQualities.append(curQual)
if curQual << 16 & quality:
bestQualities.append(curQual)
return (sorted(anyQualities), sorted(bestQualities))
@staticmethod
def nameQuality(name, anime=False):
"""
Return The quality from an episode File renamed by SickRage
If no quality is achieved it will try sceneQuality regex
"""
name = os.path.basename(name)
# if we have our exact text then assume we put it there
for x in sorted(Quality.qualityStrings.keys(), reverse=True):
if x == Quality.UNKNOWN:
continue
if x == Quality.NONE: #Last chance
return Quality.sceneQuality(name, anime)
regex = '\W' + Quality.qualityStrings[x].replace(' ', '\W') + '\W'
regex_match = re.search(regex, name, re.I)
if regex_match:
return x
@staticmethod
def sceneQuality(name, anime=False):
"""
Return The quality from the scene episode File
"""
if not name:
return Quality.UNKNOWN
name = os.path.basename(name)
checkName = lambda list, func: func([re.search(x, name, re.I) for x in list])
if anime:
dvdOptions = checkName(["dvd", "dvdrip"], any)
blueRayOptions = checkName(["bluray", "blu-ray", "BD"], any)
sdOptions = checkName(["360p", "480p", "848x480", "XviD","hdtv"], any)
hdOptions = checkName(["720p", "1280x720", "960x720"], any)
fullHD = checkName(["1080p", "1920x1080"], any)
if sdOptions and not blueRayOptions and not dvdOptions:
return Quality.SDTV
elif dvdOptions:
return Quality.SDDVD
elif hdOptions and not blueRayOptions and not fullHD:
return Quality.HDTV
elif fullHD and not blueRayOptions and not hdOptions:
return Quality.FULLHDTV
elif hdOptions and not blueRayOptions and not fullHD:
return Quality.HDWEBDL
elif blueRayOptions and hdOptions and not fullHD:
return Quality.HDBLURAY
elif blueRayOptions and fullHD and not hdOptions:
return Quality.FULLHDBLURAY
else:
return Quality.UNKNOWN
if (checkName(["(pdtv|hdtv|dsr|tvrip).(xvid|x264|h.?264)"], all) or checkName(["(pdtv|hdtv|dsr|tvrip)"], all)) and not checkName(["(720|1080)[pi]"], all) and\
not checkName(["hr.ws.pdtv.x264"], any):
return Quality.SDTV
elif checkName(["web.dl|webrip", "xvid|x264|h.?264"], all) and not checkName(["(720|1080)[pi]"], all):
return Quality.SDTV
elif checkName(["(dvdrip|b[r|d]rip)(.ws)?.(xvid|divx|x264)"], any) and not checkName(["(720|1080)[pi]"], all):
return Quality.SDDVD
elif checkName(["720p", "hdtv", "x264"], all) or checkName(["hr.ws.pdtv.x264"], any) and not checkName(
["(1080)[pi]"], all):
return Quality.HDTV
elif checkName(["720p|1080i", "hdtv", "mpeg-?2"], all) or checkName(["1080[pi].hdtv", "h.?264"], all):
return Quality.RAWHDTV
elif checkName(["1080p", "hdtv", "x264"], all):
return Quality.FULLHDTV
elif checkName(["720p", "web.dl|webrip"], all) or checkName(["720p", "itunes", "h.?264"], all):
return Quality.HDWEBDL
elif checkName(["1080p", "web.dl|webrip"], all) or checkName(["1080p", "itunes", "h.?264"], all):
return Quality.FULLHDWEBDL
elif checkName(["720p", "bluray|hddvd|b[r|d]rip", "x264"], all):
return Quality.HDBLURAY
elif checkName(["1080p", "bluray|hddvd|b[r|d]rip", "x264"], all):
return Quality.FULLHDBLURAY
else:
return Quality.UNKNOWN
@staticmethod
def assumeQuality(name):
if name.lower().endswith((".avi", ".mp4")):
return Quality.SDTV
elif name.lower().endswith(".ts"):
return Quality.RAWHDTV
else:
return Quality.UNKNOWN
@staticmethod
def compositeStatus(status, quality):
return status + 100 * quality
@staticmethod
def qualityDownloaded(status):
return (status - DOWNLOADED) / 100
@staticmethod
def splitCompositeStatus(status):
"""Returns a tuple containing (status, quality)"""
if status == UNKNOWN:
return (UNKNOWN, Quality.UNKNOWN)
for x in sorted(Quality.qualityStrings.keys(), reverse=True):
if status > x * 100:
return (status - x * 100, x)
return (status, Quality.NONE)
@staticmethod
def statusFromName(name, assume=True, anime=False):
quality = Quality.nameQuality(name, anime)
if assume and quality == Quality.UNKNOWN:
quality = Quality.assumeQuality(name)
return Quality.compositeStatus(DOWNLOADED, quality)
DOWNLOADED = None
SNATCHED = None
SNATCHED_PROPER = None
FAILED = None
SNATCHED_BEST = None
Quality.DOWNLOADED = [Quality.compositeStatus(DOWNLOADED, x) for x in Quality.qualityStrings.keys()]
Quality.SNATCHED = [Quality.compositeStatus(SNATCHED, x) for x in Quality.qualityStrings.keys()]
Quality.SNATCHED_PROPER = [Quality.compositeStatus(SNATCHED_PROPER, x) for x in Quality.qualityStrings.keys()]
Quality.FAILED = [Quality.compositeStatus(FAILED, x) for x in Quality.qualityStrings.keys()]
Quality.SNATCHED_BEST = [Quality.compositeStatus(SNATCHED_BEST, x) for x in Quality.qualityStrings.keys()]
SD = Quality.combineQualities([Quality.SDTV, Quality.SDDVD], [])
HD = Quality.combineQualities(
[Quality.HDTV, Quality.FULLHDTV, Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.HDBLURAY, Quality.FULLHDBLURAY],
[]) # HD720p + HD1080p
HD720p = Quality.combineQualities([Quality.HDTV, Quality.HDWEBDL, Quality.HDBLURAY], [])
HD1080p = Quality.combineQualities([Quality.FULLHDTV, Quality.FULLHDWEBDL, Quality.FULLHDBLURAY], [])
ANY = Quality.combineQualities(
[Quality.SDTV, Quality.SDDVD, Quality.HDTV, Quality.FULLHDTV, Quality.HDWEBDL, Quality.FULLHDWEBDL,
Quality.HDBLURAY, Quality.FULLHDBLURAY, Quality.UNKNOWN], []) # SD + HD
# legacy template, cant remove due to reference in mainDB upgrade?
BEST = Quality.combineQualities([Quality.SDTV, Quality.HDTV, Quality.HDWEBDL], [Quality.HDTV])
qualityPresets = (SD, HD, HD720p, HD1080p, ANY)
qualityPresetStrings = {SD: "SD",
HD: "HD",
HD720p: "HD720p",
HD1080p: "HD1080p",
ANY: "Any"}
class StatusStrings:
def __init__(self):
self.statusStrings = {UNKNOWN: "Unknown",
UNAIRED: "Unaired",
SNATCHED: "Snatched",
DOWNLOADED: "Downloaded",
SKIPPED: "Skipped",
SNATCHED_PROPER: "Snatched (Proper)",
WANTED: "Wanted",
ARCHIVED: "Archived",
IGNORED: "Ignored",
SUBTITLED: "Subtitled",
FAILED: "Failed",
SNATCHED_BEST: "Snatched (Best)"}
def __getitem__(self, name):
if name in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_BEST:
status, quality = Quality.splitCompositeStatus(name)
if quality == Quality.NONE:
return self.statusStrings[status]
else:
return self.statusStrings[status] + " (" + Quality.qualityStrings[quality] + ")"
else:
return self.statusStrings[name] if self.statusStrings.has_key(name) else ''
def has_key(self, name):
return name in self.statusStrings or name in Quality.DOWNLOADED or name in Quality.SNATCHED or name in Quality.SNATCHED_PROPER or name in Quality.SNATCHED_BEST
statusStrings = StatusStrings()
class Overview:
UNAIRED = UNAIRED # 1
QUAL = 2
WANTED = WANTED # 3
GOOD = 4
SKIPPED = SKIPPED # 5
# For both snatched statuses. Note: SNATCHED/QUAL have same value and break dict.
SNATCHED = SNATCHED_PROPER = SNATCHED_BEST # 9
overviewStrings = {SKIPPED: "skipped",
WANTED: "wanted",
QUAL: "qual",
GOOD: "good",
UNAIRED: "unaired",
SNATCHED: "snatched"}
# Get our xml namespaces correct for lxml
XML_NSMAP = {'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsd': 'http://www.w3.org/2001/XMLSchema'}
countryList = {'Australia': 'AU',
'Canada': 'CA',
'USA': 'US'
}
showLanguages = {'en':'english',
'fr':'french',
'':'unknown'
}
languageShortCode = {'english':'en',
'french':'fr'
}
| gpl-3.0 | 543,687,053,433,685,900 | 38.03794 | 202 | 0.587365 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.