repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mgracer48/panda3d | direct/src/gui/DirectWaitBar.py | 1 | 4632 | """Undocumented Module"""
__all__ = ['DirectWaitBar']
from panda3d.core import *
import DirectGuiGlobals as DGG
from DirectFrame import *
import types
"""
import DirectWaitBar
d = DirectWaitBar(borderWidth=(0, 0))
"""
class DirectWaitBar(DirectFrame):
""" DirectWaitBar - A DirectWidget that shows progress completed
towards a task. """
def __init__(self, parent = None, **kw):
# Inherits from DirectFrame
# A Direct Frame can have:
# - A background texture (pass in path to image, or Texture Card)
# - A midground geometry item (pass in geometry)
# - A foreground text Node (pass in text string or Onscreen Text)
optiondefs = (
# Define type of DirectGuiWidget
('pgFunc', PGWaitBar, None),
('frameSize', (-1, 1, -0.08, 0.08), None),
('borderWidth', (0, 0), None),
('range', 100, self.setRange),
('value', 0, self.setValue),
('barBorderWidth', (0, 0), self.setBarBorderWidth),
('barColor', (1, 0, 0, 1), self.setBarColor),
('barTexture', None, self.setBarTexture),
('barRelief', DGG.FLAT, self.setBarRelief),
('sortOrder', NO_FADE_SORT_INDEX, None),
)
if 'text' in kw:
textoptiondefs = (
('text_pos', (0, -0.025), None),
('text_scale', 0.1, None)
)
else:
textoptiondefs = ()
# Merge keyword options with default options
self.defineoptions(kw, optiondefs + textoptiondefs)
# Initialize superclasses
DirectFrame.__init__(self, parent)
self.barStyle = PGFrameStyle()
# Call option initialization functions
self.initialiseoptions(DirectWaitBar)
self.updateBarStyle()
def destroy(self):
self.barStyle = None
DirectFrame.destroy(self)
def setRange(self):
"""Updates the bar range which you can set using bar['range'].
This is the value at which the WaitBar indicates 100%."""
self.guiItem.setRange(self['range'])
def setValue(self):
"""Updates the bar value which you can set using bar['value'].
The value should range between 0 and bar['range']."""
self.guiItem.setValue(self['value'])
def getPercent(self):
"""Returns the percentage complete."""
return self.guiItem.getPercent()
def updateBarStyle(self):
if not self.fInit:
self.guiItem.setBarStyle(self.barStyle)
def setBarRelief(self):
"""Updates the bar relief, which you can set using bar['barRelief']."""
self.barStyle.setType(self['barRelief'])
self.updateBarStyle()
def setBarBorderWidth(self):
"""Updates the bar's border width, which you can set using bar['barBorderWidth']."""
self.barStyle.setWidth(*self['barBorderWidth'])
self.updateBarStyle()
def setBarColor(self):
"""Updates the bar color, which you can set using bar['barColor']."""
color = self['barColor']
self.barStyle.setColor(color[0], color[1], color[2], color[3])
self.updateBarStyle()
def setBarTexture(self):
"""Updates the bar texture, which you can set using bar['barTexture']."""
# this must be a single texture (or a string).
texture = self['barTexture']
if isinstance(texture, types.StringTypes):
texture = loader.loadTexture(texture)
if texture:
self.barStyle.setTexture(texture)
else:
self.barStyle.clearTexture()
self.updateBarStyle()
def update(self, value):
"""Updates the bar with the given value and renders a frame."""
self['value'] = value
# Render a frame out-of-sync with the igLoop to update the
# window right now. This allows the wait bar to be updated
# even though we are not normally rendering frames.
base.graphicsEngine.renderFrame()
def finish(self, N = 10):
"""Fill the bar in N frames. This call is blocking."""
remaining = self['range'] - self['value']
if remaining:
step = max(1, int(remaining / N))
count = self['value']
while count != self['range']:
count += step
if count > self['range']:
count = self['range']
self.update(count)
| bsd-3-clause | 8,693,055,676,541,782,000 | 36.354839 | 92 | 0.564551 | false | 4.15426 | false | false | false |
ptitdoc/Archive-qubes-core | dom0/qvm-core/qubesutils.py | 1 | 79072 | #!/usr/bin/python
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2011 Marek Marczykowski <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
from qubes import QubesVm,QubesException,QubesVmCollection
from qubes import QubesVmClasses
from qubes import xs, xl_ctx, qubes_guid_path, qubes_clipd_path, qrexec_client_path
from qubes import qubes_store_filename, qubes_base_dir
from qubes import qubes_servicevms_dir, qubes_templates_dir, qubes_appvms_dir
import sys
import os
import subprocess
import re
import time
import grp,pwd
from datetime import datetime
from qmemman_client import QMemmanClient
import xen.lowlevel.xc
import xen.lowlevel.xl
import xen.lowlevel.xs
def mbytes_to_kmg(size):
if size > 1024:
return "%d GiB" % (size/1024)
else:
return "%d MiB" % size
def kbytes_to_kmg(size):
if size > 1024:
return mbytes_to_kmg(size/1024)
else:
return "%d KiB" % size
def bytes_to_kmg(size):
if size > 1024:
return kbytes_to_kmg(size/1024)
else:
return "%d B" % size
def size_to_human (size):
"""Humane readable size, with 1/10 precission"""
if size < 1024:
return str (size);
elif size < 1024*1024:
return str(round(size/1024.0,1)) + ' KiB'
elif size < 1024*1024*1024:
return str(round(size/(1024.0*1024),1)) + ' MiB'
else:
return str(round(size/(1024.0*1024*1024),1)) + ' GiB'
def parse_size(size):
units = [ ('K', 1024), ('KB', 1024),
('M', 1024*1024), ('MB', 1024*1024),
('G', 1024*1024*1024), ('GB', 1024*1024*1024),
]
size = size.strip().upper()
if size.isdigit():
return int(size)
for unit, multiplier in units:
if size.endswith(unit):
size = size[:-len(unit)].strip()
return int(size)*multiplier
raise QubesException("Invalid size: {0}.".format(size))
def print_stdout(text):
print (text)
def print_stderr(text):
print >> sys.stderr, (text)
###### Block devices ########
def block_devid_to_name(devid):
major = devid / 256
minor = devid % 256
dev_class = ""
if major == 202:
dev_class = "xvd"
elif major == 8:
dev_class = "sd"
else:
raise QubesException("Unknown device class %d" % major)
if minor % 16 == 0:
return "%s%c" % (dev_class, ord('a')+minor/16)
else:
return "%s%c%d" % (dev_class, ord('a')+minor/16, minor%16)
def block_name_to_majorminor(name):
# check if it is already devid
if isinstance(name, int):
return (name / 256, name % 256)
if name.isdigit():
return (int(name) / 256, int(name) % 256)
major = 0
minor = 0
dXpY_style = False
disk = True
if name.startswith("xvd"):
major = 202
elif name.startswith("sd"):
major = 8
elif name.startswith("mmcblk"):
dXpY_style = True
major = 179
elif name.startswith("scd"):
disk = False
major = 11
elif name.startswith("sr"):
disk = False
major = 11
elif name.startswith("loop"):
disk = False
major = 7
elif name.startswith("md"):
disk = False
major = 9
else:
# Unknown device
return (0, 0)
if not dXpY_style:
name_match = re.match(r"^([a-z]+)([a-z])([0-9]*)$", name)
else:
name_match = re.match(r"^([a-z]+)([0-9]*)(?:p([0-9]+))?$", name)
if not name_match:
raise QubesException("Invalid device name: %s" % name)
if disk:
if dXpY_style:
minor = int(name_match.group(2))*8
else:
minor = (ord(name_match.group(2))-ord('a')) * 16
else:
minor = 0
if name_match.group(3):
minor += int(name_match.group(3))
return (major, minor)
def block_name_to_devid(name):
# check if it is already devid
if isinstance(name, int):
return name
if name.isdigit():
return int(name)
(major, minor) = block_name_to_majorminor(name)
return major << 8 | minor
def block_find_unused_frontend(vm = None):
assert vm is not None
assert vm.is_running()
vbd_list = xs.ls('', '/local/domain/%d/device/vbd' % vm.xid)
# xvd* devices
major = 202
# prefer xvdi
for minor in range(8*16,254,16)+range(0,8*16,16):
if vbd_list is None or str(major << 8 | minor) not in vbd_list:
return block_devid_to_name(major << 8 | minor)
return None
def block_list(vm = None, system_disks = False):
device_re = re.compile(r"^[a-z0-9]{1,12}$")
# FIXME: any better idea of desc_re?
desc_re = re.compile(r"^.{1,255}$")
mode_re = re.compile(r"^[rw]$")
xs_trans = xs.transaction_start()
vm_list = []
if vm is not None:
if not vm.is_running():
xs.transaction_end(xs_trans)
return []
else:
vm_list = [ str(vm.xid) ]
else:
vm_list = xs.ls(xs_trans, '/local/domain')
devices_list = {}
for xid in vm_list:
vm_name = xs.read(xs_trans, '/local/domain/%s/name' % xid)
vm_devices = xs.ls(xs_trans, '/local/domain/%s/qubes-block-devices' % xid)
if vm_devices is None:
continue
for device in vm_devices:
# Sanitize device name
if not device_re.match(device):
print >> sys.stderr, "Invalid device name in VM '%s'" % vm_name
continue
device_size = xs.read(xs_trans, '/local/domain/%s/qubes-block-devices/%s/size' % (xid, device))
device_desc = xs.read(xs_trans, '/local/domain/%s/qubes-block-devices/%s/desc' % (xid, device))
device_mode = xs.read(xs_trans, '/local/domain/%s/qubes-block-devices/%s/mode' % (xid, device))
if device_size is None or device_desc is None or device_mode is None:
print >> sys.stderr, "Missing field in %s device parameters" % device
continue
if not device_size.isdigit():
print >> sys.stderr, "Invalid %s device size in VM '%s'" % (device, vm_name)
continue
if not desc_re.match(device_desc):
print >> sys.stderr, "Invalid %s device desc in VM '%s'" % (device, vm_name)
continue
if not mode_re.match(device_mode):
print >> sys.stderr, "Invalid %s device mode in VM '%s'" % (device, vm_name)
continue
# Check if we know major number for this device; attach will work without this, but detach and check_attached don't
if block_name_to_majorminor(device) == (0, 0):
print >> sys.stderr, "Unsupported device %s:%s" % (vm_name, device)
continue
if not system_disks:
if xid == '0' and device_desc.startswith(qubes_base_dir):
continue
visible_name = "%s:%s" % (vm_name, device)
devices_list[visible_name] = {"name": visible_name, "xid":int(xid),
"vm": vm_name, "device":device, "size":int(device_size),
"desc":device_desc, "mode":device_mode}
xs.transaction_end(xs_trans)
return devices_list
def block_check_attached(backend_vm, device, backend_xid = None):
if backend_xid is None:
backend_xid = backend_vm.xid
xs_trans = xs.transaction_start()
vm_list = xs.ls(xs_trans, '/local/domain/%d/backend/vbd' % backend_xid)
if vm_list is None:
xs.transaction_end(xs_trans)
return None
device_majorminor = None
try:
device_majorminor = block_name_to_majorminor(device)
except:
# Unknown devices will be compared directly - perhaps it is a filename?
pass
for vm_xid in vm_list:
for devid in xs.ls(xs_trans, '/local/domain/%d/backend/vbd/%s' % (backend_xid, vm_xid)):
(tmp_major, tmp_minor) = (0, 0)
phys_device = xs.read(xs_trans, '/local/domain/%d/backend/vbd/%s/%s/physical-device' % (backend_xid, vm_xid, devid))
dev_params = xs.read(xs_trans, '/local/domain/%d/backend/vbd/%s/%s/params' % (backend_xid, vm_xid, devid))
if phys_device and phys_device.find(':'):
(tmp_major, tmp_minor) = phys_device.split(":")
tmp_major = int(tmp_major, 16)
tmp_minor = int(tmp_minor, 16)
else:
# perhaps not ready yet - check params
if not dev_params:
# Skip not-phy devices
continue
elif not dev_params.startswith('/dev/'):
# will compare params directly
pass
else:
(tmp_major, tmp_minor) = block_name_to_majorminor(dev_params.lstrip('/dev/'))
if (device_majorminor and (tmp_major, tmp_minor) == device_majorminor) or \
(device_majorminor is None and dev_params == device):
vm_name = xl_ctx.domid_to_name(int(vm_xid))
frontend = block_devid_to_name(int(devid))
xs.transaction_end(xs_trans)
return {"xid":int(vm_xid), "frontend": frontend, "devid": int(devid), "vm": vm_name}
xs.transaction_end(xs_trans)
return None
def block_attach(vm, backend_vm, device, frontend=None, mode="w", auto_detach=False, wait=True):
device_attach_check(vm, backend_vm, device, frontend)
do_block_attach(vm, backend_vm, device, frontend, mode, auto_detach, wait)
def device_attach_check(vm, backend_vm, device, frontend):
""" Checks all the parameters, dies on errors """
if not vm.is_running():
raise QubesException("VM %s not running" % vm.name)
if not backend_vm.is_running():
raise QubesException("VM %s not running" % backend_vm.name)
def do_block_attach(vm, backend_vm, device, frontend, mode, auto_detach, wait):
if frontend is None:
frontend = block_find_unused_frontend(vm)
if frontend is None:
raise QubesException("No unused frontend found")
else:
# Check if any device attached at this frontend
if xs.read('', '/local/domain/%d/device/vbd/%d/state' % (vm.xid, block_name_to_devid(frontend))) == '4':
raise QubesException("Frontend %s busy in VM %s, detach it first" % (frontend, vm.name))
# Check if this device is attached to some domain
attached_vm = block_check_attached(backend_vm, device)
if attached_vm:
if auto_detach:
block_detach(None, attached_vm['devid'], vm_xid=attached_vm['xid'])
else:
raise QubesException("Device %s from %s already connected to VM %s as %s" % (device, backend_vm.name, attached_vm['vm'], attached_vm['frontend']))
if device.startswith('/'):
backend_dev = 'script:file:' + device
else:
backend_dev = 'phy:/dev/' + device
xl_cmd = [ '/usr/sbin/xl', 'block-attach', vm.name, backend_dev, frontend, mode, str(backend_vm.xid) ]
subprocess.check_call(xl_cmd)
if wait:
be_path = '/local/domain/%d/backend/vbd/%d/%d' % (backend_vm.xid, vm.xid, block_name_to_devid(frontend))
# There is no way to use xenstore watch with a timeout, so must check in a loop
interval = 0.100
# 5sec timeout
timeout = 5/interval
while timeout > 0:
be_state = xs.read('', be_path + '/state')
hotplug_state = xs.read('', be_path + '/hotplug-status')
if be_state is None:
raise QubesException("Backend device disappeared, something weird happened")
elif int(be_state) == 4:
# Ok
return
elif int(be_state) > 4:
# Error
error = xs.read('', '/local/domain/%d/error/backend/vbd/%d/%d/error' % (backend_vm.xid, vm.xid, block_name_to_devid(frontend)))
if error is not None:
raise QubesException("Error while connecting block device: " + error)
else:
raise QubesException("Unknown error while connecting block device")
elif hotplug_state == 'error':
hotplug_error = xs.read('', be_path + '/hotplug-error')
if hotplug_error:
raise QubesException("Error while connecting block device: " + hotplug_error)
else:
raise QubesException("Unknown hotplug error while connecting block device")
time.sleep(interval)
timeout -= interval
raise QubesException("Timeout while waiting for block defice connection")
def block_detach(vm, frontend = "xvdi", vm_xid = None):
# Get XID if not provided already
if vm_xid is None:
if not vm.is_running():
raise QubesException("VM %s not running" % vm.name)
# FIXME: potential race
vm_xid = vm.xid
# Check if this device is really connected
if not xs.read('', '/local/domain/%d/device/vbd/%d/state' % (vm_xid, block_name_to_devid(frontend))) == '4':
# Do nothing - device already detached
return
xl_cmd = [ '/usr/sbin/xl', 'block-detach', str(vm_xid), str(frontend)]
subprocess.check_call(xl_cmd)
def block_detach_all(vm, vm_xid = None):
""" Detach all non-system devices"""
# Get XID if not provided already
if vm_xid is None:
if not vm.is_running():
raise QubesException("VM %s not running" % vm.name)
# FIXME: potential race
vm_xid = vm.xid
xs_trans = xs.transaction_start()
devices = xs.ls(xs_trans, '/local/domain/%d/device/vbd' % vm_xid)
if devices is None:
return
devices_to_detach = []
for devid in devices:
# check if this is system disk
be_path = xs.read(xs_trans, '/local/domain/%d/device/vbd/%s/backend' % (vm_xid, devid))
assert be_path is not None
be_params = xs.read(xs_trans, be_path + '/params')
if be_path.startswith('/local/domain/0/') and be_params is not None and be_params.startswith(qubes_base_dir):
# system disk
continue
devices_to_detach.append(devid)
xs.transaction_end(xs_trans)
for devid in devices_to_detach:
xl_cmd = [ '/usr/sbin/xl', 'block-detach', str(vm_xid), devid]
subprocess.check_call(xl_cmd)
####### USB devices ######
usb_ver_re = re.compile(r"^(1|2)$")
usb_device_re = re.compile(r"^[0-9]+-[0-9]+(_[0-9]+)?$")
usb_port_re = re.compile(r"^$|^[0-9]+-[0-9]+(\.[0-9]+)?$")
def usb_setup(backend_vm_xid, vm_xid, devid, usb_ver):
"""
Attach frontend to the backend.
backend_vm_xid - id of the backend domain
vm_xid - id of the frontend domain
devid - id of the pvusb controller
"""
num_ports = 8
trans = xs.transaction_start()
be_path = "/local/domain/%d/backend/vusb/%d/%d" % (backend_vm_xid, vm_xid, devid)
fe_path = "/local/domain/%d/device/vusb/%d" % (vm_xid, devid)
be_perm = [{'dom': backend_vm_xid}, {'dom': vm_xid, 'read': True} ]
fe_perm = [{'dom': vm_xid}, {'dom': backend_vm_xid, 'read': True} ]
# Create directories and set permissions
xs.write(trans, be_path, "")
xs.set_permissions(trans, be_path, be_perm)
xs.write(trans, fe_path, "")
xs.set_permissions(trans, fe_path, fe_perm)
# Write backend information into the location that frontend looks for
xs.write(trans, "%s/backend-id" % fe_path, str(backend_vm_xid))
xs.write(trans, "%s/backend" % fe_path, be_path)
# Write frontend information into the location that backend looks for
xs.write(trans, "%s/frontend-id" % be_path, str(vm_xid))
xs.write(trans, "%s/frontend" % be_path, fe_path)
# Write USB Spec version field.
xs.write(trans, "%s/usb-ver" % be_path, usb_ver)
# Write virtual root hub field.
xs.write(trans, "%s/num-ports" % be_path, str(num_ports))
for port in range(1, num_ports+1):
# Set all port to disconnected state
xs.write(trans, "%s/port/%d" % (be_path, port), "")
# Set state to XenbusStateInitialising
xs.write(trans, "%s/state" % fe_path, "1")
xs.write(trans, "%s/state" % be_path, "1")
xs.write(trans, "%s/online" % be_path, "1")
xs.transaction_end(trans)
def usb_decode_device_from_xs(xs_encoded_device):
""" recover actual device name (xenstore doesn't allow dot in key names, so it was translated to underscore) """
return xs_encoded_device.replace('_', '.')
def usb_encode_device_for_xs(device):
""" encode actual device name (xenstore doesn't allow dot in key names, so translated it into underscore) """
return device.replace('.', '_')
def usb_list():
"""
Returns a dictionary of USB devices (for PVUSB backends running in all VM).
The dictionary is keyed by 'name' (see below), each element is a dictionary itself:
vm = name of the backend domain
xid = xid of the backend domain
device = <frontend device number>-<frontend port number>
name = <name of backend domain>:<frontend device number>-<frontend port number>
desc = description
"""
# FIXME: any better idea of desc_re?
desc_re = re.compile(r"^.{1,255}$")
devices_list = {}
xs_trans = xs.transaction_start()
vm_list = xs.ls(xs_trans, '/local/domain')
for xid in vm_list:
vm_name = xs.read(xs_trans, '/local/domain/%s/name' % xid)
vm_devices = xs.ls(xs_trans, '/local/domain/%s/qubes-usb-devices' % xid)
if vm_devices is None:
continue
# when listing devices in xenstore we get encoded names
for xs_encoded_device in vm_devices:
# Sanitize device id
if not usb_device_re.match(xs_encoded_device):
print >> sys.stderr, "Invalid device id in backend VM '%s'" % vm_name
continue
device = usb_decode_device_from_xs(xs_encoded_device)
device_desc = xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/desc' % (xid, xs_encoded_device))
if not desc_re.match(device_desc):
print >> sys.stderr, "Invalid %s device desc in VM '%s'" % (device, vm_name)
continue
visible_name = "%s:%s" % (vm_name, device)
# grab version
usb_ver = xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/usb-ver' % (xid, xs_encoded_device))
if usb_ver is None or not usb_ver_re.match(usb_ver):
print >> sys.stderr, "Invalid %s device USB version in VM '%s'" % (device, vm_name)
continue
devices_list[visible_name] = {"name": visible_name, "xid":int(xid),
"vm": vm_name, "device":device,
"desc":device_desc,
"usb_ver":usb_ver}
xs.transaction_end(xs_trans)
return devices_list
def usb_check_attached(xs_trans, backend_vm, device):
"""
Checks if the given device in the given backend attached to any frontend.
Parameters:
backend_vm - xid of the backend domain
device - device name in the backend domain
Returns None or a dictionary:
vm - the name of the frontend domain
xid - xid of the frontend domain
frontend - frontend device number FIXME
devid - frontend port number FIXME
"""
# sample xs content: /local/domain/0/backend/vusb/4/0/port/1 = "7-5"
attached_dev = None
vms = xs.ls(xs_trans, '/local/domain/%d/backend/vusb' % backend_vm)
if vms is None:
return None
for vm in vms:
if not vm.isdigit():
print >> sys.stderr, "Invalid VM id"
continue
frontend_devs = xs.ls(xs_trans, '/local/domain/%d/backend/vusb/%s' % (backend_vm, vm))
if frontend_devs is None:
continue
for frontend_dev in frontend_devs:
if not frontend_dev.isdigit():
print >> sys.stderr, "Invalid frontend in VM %s" % vm
continue
ports = xs.ls(xs_trans, '/local/domain/%d/backend/vusb/%s/%s/port' % (backend_vm, vm, frontend_dev))
if ports is None:
continue
for port in ports:
# FIXME: refactor, see similar loop in usb_find_unused_frontend(), use usb_list() instead?
if not port.isdigit():
print >> sys.stderr, "Invalid port in VM %s frontend %s" % (vm, frontend)
continue
dev = xs.read(xs_trans, '/local/domain/%d/backend/vusb/%s/%s/port/%s' % (backend_vm, vm, frontend_dev, port))
if dev == "":
continue
# Sanitize device id
if not usb_port_re.match(dev):
print >> sys.stderr, "Invalid device id in backend VM %d @ %s/%s/port/%s" % \
(backend_vm, vm, frontend_dev, port)
continue
if dev == device:
frontend = "%s-%s" % (frontend_dev, port)
vm_name = xl_ctx.domid_to_name(int(vm))
if vm_name is None:
# FIXME: should we wipe references to frontends running on nonexistent VMs?
continue
attached_dev = {"xid":int(vm), "frontend": frontend, "devid": device, "vm": vm_name}
break
return attached_dev
#def usb_check_frontend_busy(vm, front_dev, port):
# devport = frontend.split("-")
# if len(devport) != 2:
# raise QubesException("Malformed frontend syntax, must be in device-port format")
# # FIXME:
# # return xs.read('', '/local/domain/%d/device/vusb/%d/state' % (vm.xid, frontend)) == '4'
# return False
def usb_find_unused_frontend(xs_trans, backend_vm_xid, vm_xid, usb_ver):
"""
Find an unused frontend/port to link the given backend with the given frontend.
Creates new frontend if needed.
Returns frontend specification in <device>-<port> format.
"""
# This variable holds an index of last frontend scanned by the loop below.
# If nothing found, this value will be used to derive the index of a new frontend.
last_frontend_dev = -1
frontend_devs = xs.ls(xs_trans, "/local/domain/%d/device/vusb" % vm_xid)
if frontend_devs is not None:
for frontend_dev in frontend_devs:
if not frontend_dev.isdigit():
print >> sys.stderr, "Invalid frontend_dev in VM %d" % vm_xid
continue
frontend_dev = int(frontend_dev)
fe_path = "/local/domain/%d/device/vusb/%d" % (vm_xid, frontend_dev)
if xs.read(xs_trans, "%s/backend-id" % fe_path) == str(backend_vm_xid):
if xs.read(xs_trans, '/local/domain/%d/backend/vusb/%d/%d/usb-ver' % (backend_vm_xid, vm_xid, frontend_dev)) != usb_ver:
last_frontend_dev = frontend_dev
continue
# here: found an existing frontend already connected to right backend using an appropriate USB version
ports = xs.ls(xs_trans, '/local/domain/%d/backend/vusb/%d/%d/port' % (backend_vm_xid, vm_xid, frontend_dev))
if ports is None:
print >> sys.stderr, "No ports in VM %d frontend_dev %d?" % (vm_xid, frontend_dev)
last_frontend_dev = frontend_dev
continue
for port in ports:
# FIXME: refactor, see similar loop in usb_check_attached(), use usb_list() instead?
if not port.isdigit():
print >> sys.stderr, "Invalid port in VM %d frontend_dev %d" % (vm_xid, frontend_dev)
continue
port = int(port)
dev = xs.read(xs_trans, '/local/domain/%d/backend/vusb/%d/%s/port/%s' % (backend_vm_xid, vm_xid, frontend_dev, port))
# Sanitize device id
if not usb_port_re.match(dev):
print >> sys.stderr, "Invalid device id in backend VM %d @ %d/%d/port/%d" % \
(backend_vm_xid, vm_xid, frontend_dev, port)
continue
if dev == "":
return '%d-%d' % (frontend_dev, port)
last_frontend_dev = frontend_dev
# create a new frontend_dev and link it to the backend
frontend_dev = last_frontend_dev + 1
usb_setup(backend_vm_xid, vm_xid, frontend_dev, usb_ver)
return '%d-%d' % (frontend_dev, 1)
def usb_attach(vm, backend_vm, device, frontend=None, auto_detach=False, wait=True):
device_attach_check(vm, backend_vm, device, frontend)
xs_trans = xs.transaction_start()
xs_encoded_device = usb_encode_device_for_xs(device)
usb_ver = xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/usb-ver' % (backend_vm.xid, xs_encoded_device))
if usb_ver is None or not usb_ver_re.match(usb_ver):
xs.transaction_end(xs_trans)
raise QubesException("Invalid %s device USB version in VM '%s'" % (device, backend_vm.name))
if frontend is None:
frontend = usb_find_unused_frontend(xs_trans, backend_vm.xid, vm.xid, usb_ver)
else:
# Check if any device attached at this frontend
#if usb_check_frontend_busy(vm, frontend):
# raise QubesException("Frontend %s busy in VM %s, detach it first" % (frontend, vm.name))
xs.transaction_end(xs_trans)
raise NotImplementedError("Explicit USB frontend specification is not implemented yet")
# Check if this device is attached to some domain
attached_vm = usb_check_attached(xs_trans, backend_vm.xid, device)
xs.transaction_end(xs_trans)
if attached_vm:
if auto_detach:
usb_detach(backend_vm, attached_vm)
else:
raise QubesException("Device %s from %s already connected to VM %s as %s" % (device, backend_vm.name, attached_vm['vm'], attached_vm['frontend']))
# Run helper script
xl_cmd = [ '/usr/lib/qubes/xl-qvm-usb-attach.py', str(vm.xid), device, frontend, str(backend_vm.xid) ]
subprocess.check_call(xl_cmd)
def usb_detach(backend_vm, attachment):
xl_cmd = [ '/usr/lib/qubes/xl-qvm-usb-detach.py', str(attachment['xid']), attachment['devid'], attachment['frontend'], str(backend_vm.xid) ]
subprocess.check_call(xl_cmd)
def usb_detach_all(vm):
raise NotImplementedError("Detaching all devices from a given VM is not implemented yet")
####### QubesWatch ######
def only_in_first_list(l1, l2):
ret=[]
for i in l1:
if not i in l2:
ret.append(i)
return ret
class QubesWatch(object):
class WatchType(object):
def __init__(self, fn, param):
self.fn = fn
self.param = param
def __init__(self):
self.xs = xen.lowlevel.xs.xs()
self.watch_tokens_block = {}
self.watch_tokens_vbd = {}
self.block_callback = None
self.domain_callback = None
self.xs.watch('@introduceDomain', QubesWatch.WatchType(self.domain_list_changed, None))
self.xs.watch('@releaseDomain', QubesWatch.WatchType(self.domain_list_changed, None))
def setup_block_watch(self, callback):
old_block_callback = self.block_callback
self.block_callback = callback
if old_block_callback is not None and callback is None:
# remove watches
self.update_watches_vbd([])
self.update_watches_block([])
else:
# possibly add watches
self.domain_list_changed(None)
def setup_domain_watch(self, callback):
self.domain_callback = callback
def get_block_key(self, xid):
return '/local/domain/%s/qubes-block-devices' % xid
def get_vbd_key(self, xid):
return '/local/domain/%s/device/vbd' % xid
def update_watches_block(self, xid_list):
for i in only_in_first_list(xid_list, self.watch_tokens_block.keys()):
#new domain has been created
watch = QubesWatch.WatchType(self.block_callback, i)
self.watch_tokens_block[i] = watch
self.xs.watch(self.get_block_key(i), watch)
for i in only_in_first_list(self.watch_tokens_block.keys(), xid_list):
#domain destroyed
self.xs.unwatch(self.get_block_key(i), self.watch_tokens_block[i])
self.watch_tokens_block.pop(i)
def update_watches_vbd(self, xid_list):
for i in only_in_first_list(xid_list, self.watch_tokens_vbd.keys()):
#new domain has been created
watch = QubesWatch.WatchType(self.block_callback, i)
self.watch_tokens_vbd[i] = watch
self.xs.watch(self.get_vbd_key(i), watch)
for i in only_in_first_list(self.watch_tokens_vbd.keys(), xid_list):
#domain destroyed
self.xs.unwatch(self.get_vbd_key(i), self.watch_tokens_vbd[i])
self.watch_tokens_vbd.pop(i)
def domain_list_changed(self, param):
curr = self.xs.ls('', '/local/domain')
if curr == None:
return
if self.domain_callback:
self.domain_callback()
if self.block_callback:
self.update_watches_block(curr)
self.update_watches_vbd(curr)
def watch_single(self):
result = self.xs.read_watch()
token = result[1]
token.fn(token.param)
def watch_loop(self):
while True:
self.watch_single()
######## Backups #########
def get_disk_usage(file_or_dir):
if not os.path.exists(file_or_dir):
return 0
p = subprocess.Popen (["du", "-s", "--block-size=1", file_or_dir],
stdout=subprocess.PIPE)
result = p.communicate()
m = re.match(r"^(\d+)\s.*", result[0])
sz = int(m.group(1)) if m is not None else 0
return sz
def file_to_backup (file_path, sz = None):
if sz is None:
sz = os.path.getsize (qubes_store_filename)
abs_file_path = os.path.abspath (file_path)
abs_base_dir = os.path.abspath (qubes_base_dir) + '/'
abs_file_dir = os.path.dirname (abs_file_path) + '/'
(nothing, dir, subdir) = abs_file_dir.partition (abs_base_dir)
assert nothing == ""
assert dir == abs_base_dir
return [ { "path" : file_path, "size": sz, "subdir": subdir} ]
def backup_prepare(base_backup_dir, vms_list = None, exclude_list = [], print_callback = print_stdout):
"""If vms = None, include all (sensible) VMs; exclude_list is always applied"""
'''
if not os.path.exists (base_backup_dir):
raise QubesException("The target directory doesn't exist!")
'''
files_to_backup = file_to_backup (qubes_store_filename)
if exclude_list is None:
exclude_list = []
qvm_collection = None
if vms_list is None:
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_writing()
qvm_collection.load()
all_vms = [vm for vm in qvm_collection.values()]
appvms_to_backup = [vm for vm in all_vms if vm.is_appvm() and not vm.internal]
netvms_to_backup = [vm for vm in all_vms if vm.is_netvm() and not vm.qid == 0]
template_vms_worth_backingup = [vm for vm in all_vms if (vm.is_template() and not vm.installed_by_rpm)]
vms_list = appvms_to_backup + netvms_to_backup + template_vms_worth_backingup
vms_for_backup = vms_list
# Apply exclude list
if exclude_list:
vms_for_backup = [vm for vm in vms_list if vm.name not in exclude_list]
no_vms = len (vms_for_backup)
there_are_running_vms = False
fields_to_display = [
{ "name": "VM", "width": 16},
{ "name": "type","width": 12 },
{ "name": "size", "width": 12}
]
# Display the header
s = ""
for f in fields_to_display:
fmt="{{0:-^{0}}}-+".format(f["width"] + 1)
s += fmt.format('-')
print_callback(s)
s = ""
for f in fields_to_display:
fmt="{{0:>{0}}} |".format(f["width"] + 1)
s += fmt.format(f["name"])
print_callback(s)
s = ""
for f in fields_to_display:
fmt="{{0:-^{0}}}-+".format(f["width"] + 1)
s += fmt.format('-')
print_callback(s)
for vm in vms_for_backup:
if vm.is_template():
# handle templates later
continue
if vm.private_img is not None:
vm_sz = vm.get_disk_usage (vm.private_img)
files_to_backup += file_to_backup(vm.private_img, vm_sz )
if vm.is_appvm():
files_to_backup += file_to_backup(vm.icon_path)
if vm.updateable:
if os.path.exists(vm.dir_path + "/apps.templates"):
# template
files_to_backup += file_to_backup(vm.dir_path + "/apps.templates")
else:
# standaloneVM
files_to_backup += file_to_backup(vm.dir_path + "/apps")
if os.path.exists(vm.dir_path + "/kernels"):
files_to_backup += file_to_backup(vm.dir_path + "/kernels")
if os.path.exists (vm.firewall_conf):
files_to_backup += file_to_backup(vm.firewall_conf)
if os.path.exists(vm.dir_path + '/whitelisted-appmenus.list'):
files_to_backup += file_to_backup(vm.dir_path + '/whitelisted-appmenus.list')
if vm.updateable:
sz = vm.get_disk_usage(vm.root_img)
files_to_backup += file_to_backup(vm.root_img, sz)
vm_sz += sz
s = ""
fmt="{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
s += fmt.format(vm.name)
fmt="{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
if vm.is_netvm():
s += fmt.format("NetVM" + (" + Sys" if vm.updateable else ""))
else:
s += fmt.format("AppVM" + (" + Sys" if vm.updateable else ""))
fmt="{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
s += fmt.format(size_to_human(vm_sz))
if vm.is_running():
s += " <-- The VM is running, please shut it down before proceeding with the backup!"
there_are_running_vms = True
print_callback(s)
for vm in vms_for_backup:
if not vm.is_template():
# already handled
continue
vm_sz = vm.get_disk_utilization()
files_to_backup += file_to_backup (vm.dir_path, vm_sz)
s = ""
fmt="{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
s += fmt.format(vm.name)
fmt="{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
s += fmt.format("Template VM")
fmt="{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
s += fmt.format(size_to_human(vm_sz))
if vm.is_running():
s += " <-- The VM is running, please shut it down before proceeding with the backup!"
there_are_running_vms = True
print_callback(s)
# Initialize backup flag on all VMs
for vm in qvm_collection.values():
vm.backup_content = False
if vm in vms_for_backup:
vm.backup_content = True
vm.backup_size = vm.get_disk_utilization()
vm.backup_path = vm.dir_path.split(os.path.normpath(qubes_base_dir)+"/")[1]
qvm_collection.save()
# FIXME: should be after backup completed
qvm_collection.unlock_db()
# Dom0 user home
if not 'dom0' in exclude_list:
local_user = grp.getgrnam('qubes').gr_mem[0]
home_dir = pwd.getpwnam(local_user).pw_dir
# Home dir should have only user-owned files, so fix it now to prevent
# permissions problems - some root-owned files can left after
# 'sudo bash' and similar commands
subprocess.check_call(['sudo', 'chown', '-R', local_user, home_dir])
home_sz = get_disk_usage(home_dir)
home_to_backup = [ { "path" : home_dir, "size": home_sz, "subdir": 'dom0-home'} ]
files_to_backup += home_to_backup
s = ""
fmt="{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
s += fmt.format('Dom0')
fmt="{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
s += fmt.format("User home")
fmt="{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
s += fmt.format(size_to_human(home_sz))
print_callback(s)
total_backup_sz = 0
for file in files_to_backup:
total_backup_sz += file["size"]
s = ""
for f in fields_to_display:
fmt="{{0:-^{0}}}-+".format(f["width"] + 1)
s += fmt.format('-')
print_callback(s)
s = ""
fmt="{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
s += fmt.format("Total size:")
fmt="{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1 + 2 + fields_to_display[2]["width"] + 1)
s += fmt.format(size_to_human(total_backup_sz))
print_callback(s)
s = ""
for f in fields_to_display:
fmt="{{0:-^{0}}}-+".format(f["width"] + 1)
s += fmt.format('-')
print_callback(s)
'''
stat = os.statvfs(base_backup_dir)
backup_fs_free_sz = stat.f_bsize * stat.f_bavail
print_callback("")
if (total_backup_sz > backup_fs_free_sz):
raise QubesException("Not enough space available on the backup filesystem!")
if (there_are_running_vms):
raise QubesException("Please shutdown all VMs before proceeding.")
print_callback("-> Available space: {0}".format(size_to_human(backup_fs_free_sz)))
'''
return files_to_backup
def backup_do(base_backup_dir, files_to_backup, progress_callback = None):
total_backup_sz = 0
for file in files_to_backup:
total_backup_sz += file["size"]
backup_dir = base_backup_dir + "/qubes-{0}".format (time.strftime("%Y-%m-%d-%H%M%S"))
if os.path.exists (backup_dir):
raise QubesException("ERROR: the path {0} already exists?!".format(backup_dir))
os.mkdir (backup_dir)
if not os.path.exists (backup_dir):
raise QubesException("Strange: couldn't create backup dir: {0}?!".format(backup_dir))
bytes_backedup = 0
for file in files_to_backup:
# We prefer to use Linux's cp, because it nicely handles sparse files
progress = bytes_backedup * 100 / total_backup_sz
progress_callback(progress)
dest_dir = backup_dir + '/' + file["subdir"]
if file["subdir"] != "":
retcode = subprocess.call (["mkdir", "-p", dest_dir])
if retcode != 0:
raise QubesException("Cannot create directory: {0}?!".format(dest_dir))
retcode = subprocess.call (["cp", "-rp", file["path"], dest_dir])
if retcode != 0:
raise QubesException("Error while copying file {0} to {1}".format(file["path"], dest_dir))
bytes_backedup += file["size"]
progress = bytes_backedup * 100 / total_backup_sz
progress_callback(progress)
def backup_do_copy(base_backup_dir, files_to_backup, passphrase, progress_callback = None, encrypt=False, appvm=None):
total_backup_sz = 0
for file in files_to_backup:
total_backup_sz += file["size"]
vmproc = None
if appvm != None:
# Prepare the backup target (Qubes service call)
backup_target = "QUBESRPC qubes.Backup none"
# does the vm exist?
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_reading()
qvm_collection.load()
vm = qvm_collection.get_vm_by_name(appvm)
if vm is None or vm.qid not in qvm_collection:
raise QubesException("VM {0} does not exist".format(appvm))
qvm_collection.unlock_db()
# If APPVM, STDOUT is a PIPE
vmproc = vm.run(command = backup_target, passio_popen = True)
vmproc.stdin.write(base_backup_dir.replace("\r","").replace("\n","")+"\n")
backup_stdout = vmproc.stdin
else:
# Prepare the backup target (local file)
backup_target = base_backup_dir + "/qubes-{0}".format (time.strftime("%Y-%m-%d-%H%M%S"))
# Create the target directory
if not os.path.exists (base_backup_dir):
raise QubesException("ERROR: the backup directory {0} does not exists".format(base_backup_dir))
# If not APPVM, STDOUT is a local file
backup_stdout = open(backup_target,'wb')
blocks_backedup = 0
progress = blocks_backedup * 11 / total_backup_sz
progress_callback(progress)
import tempfile
feedback_file = tempfile.NamedTemporaryFile()
backup_tmpdir = tempfile.mkdtemp(prefix="/var/tmp/backup_")
# Tar with tapelength does not deals well with stdout (close stdout between two tapes)
# For this reason, we will use named pipes instead
print "Working in",backup_tmpdir
backup_pipe = os.path.join(backup_tmpdir,"backup_pipe")
print "Creating pipe in:",backup_pipe
print os.mkfifo(backup_pipe)
print "Will backup:",files_to_backup
# Setup worker to send encrypted data chunks to the backup_target
from multiprocessing import Queue,Process
class Send_Worker(Process):
def __init__(self,queue,base_dir,backup_stdout):
super(Send_Worker, self).__init__()
self.queue = queue
self.base_dir = base_dir
self.backup_stdout = backup_stdout
def run(self):
print "Started sending thread"
print "Moving to temporary dir",self.base_dir
os.chdir(self.base_dir)
for filename in iter(self.queue.get,None):
if filename == "FINISHED":
break
print "Sending file",filename
# This tar used for sending data out need to be as simple, as simple, as featureless as possible. It will not be verified before untaring.
tar_final_cmd = ["tar", "-cO", "--posix", "-C", self.base_dir, filename]
final_proc = subprocess.Popen (tar_final_cmd, stdin=subprocess.PIPE, stdout=self.backup_stdout)
final_proc.wait()
# Delete the file as we don't need it anymore
print "Removing file",filename
os.remove(filename)
print "Finished sending thread"
global blocks_backedup
blocks_backedup = 0
def compute_progress(new_size, total_backup_sz):
global blocks_backedup
blocks_backedup += new_size
progress = blocks_backedup / float(total_backup_sz)
progress_callback(int(round(progress*100,2)))
to_send = Queue()
send_proc = Send_Worker(to_send, backup_tmpdir, backup_stdout)
send_proc.start()
for filename in files_to_backup:
print "Backing up",filename
backup_tempfile = os.path.join(backup_tmpdir,filename["path"].split(os.path.normpath(qubes_base_dir)+"/")[1])
print "Using temporary location:",backup_tempfile
# Ensure the temporary directory exists
if not os.path.isdir(os.path.dirname(backup_tempfile)):
os.makedirs(os.path.dirname(backup_tempfile))
# The first tar cmd can use any complex feature as we want. Files will be verified before untaring this.
tar_cmdline = ["tar", "-Pc", "-f", backup_pipe,'--sparse','--tape-length',str(1000000),'-C',qubes_base_dir,
filename["path"].split(os.path.normpath(qubes_base_dir)+"/")[1]
]
print " ".join(tar_cmdline)
# Tips: Popen(bufsize=0)
# Pipe: tar-sparse | encryptor [| hmac] | tar | backup_target
# Pipe: tar-sparse [| hmac] | tar | backup_target
tar_sparse = subprocess.Popen (tar_cmdline,stdin=subprocess.PIPE)
# Wait for compressor (tar) process to finish or for any error of other subprocesses
i=0
run_error = "paused"
running = []
while run_error == "paused":
pipe = open(backup_pipe,'rb')
# Start HMAC
hmac = subprocess.Popen (["openssl", "dgst", "-hmac", passphrase], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Prepare a first chunk
chunkfile = backup_tempfile + "." + "%03d" % i
i += 1
chunkfile_p = open(chunkfile,'wb')
if encrypt:
# Start encrypt
# If no cipher is provided, the data is forwarded unencrypted !!!
# Also note that the
encryptor = subprocess.Popen (["openssl", "enc", "-e", "-aes-256-cbc", "-pass", "pass:"+passphrase], stdin=pipe, stdout=subprocess.PIPE)
run_error = wait_backup_feedback(compute_progress, encryptor.stdout, encryptor, chunkfile_p, total_backup_sz, hmac=hmac, vmproc=vmproc, addproc=tar_sparse)
else:
run_error = wait_backup_feedback(compute_progress, pipe, None, chunkfile_p, total_backup_sz, hmac=hmac, vmproc=vmproc, addproc=tar_sparse)
chunkfile_p.close()
print "Wait_backup_feedback returned:",run_error
if len(run_error) > 0:
send_proc.terminate()
raise QubesException("Failed to perform backup: error with "+run_error)
# Send the chunk to the backup target
to_send.put(chunkfile.split(os.path.normpath(backup_tmpdir)+"/")[1])
# Close HMAC
hmac.stdin.close()
hmac.wait()
print "HMAC proc return code:",hmac.poll()
# Write HMAC data next to the chunk file
hmac_data = hmac.stdout.read()
print "Writing hmac to",chunkfile+".hmac"
hmac_file = open(chunkfile+".hmac",'w')
hmac_file.write(hmac_data)
hmac_file.flush()
hmac_file.close()
# Send the HMAC to the backup target
to_send.put(chunkfile.split(os.path.normpath(backup_tmpdir)+"/")[1]+".hmac")
if tar_sparse.poll() == None:
# Release the next chunk
print "Release next chunk for process:",tar_sparse.poll()
#tar_sparse.stdout = subprocess.PIPE
tar_sparse.stdin.write("\n")
run_error="paused"
else:
print "Finished tar sparse with error",tar_sparse.poll()
pipe.close()
# Close the backup target and wait for it to finish
#backup_stdout.close()
to_send.put("FINISHED")
send_proc.join()
if send_proc.exitcode != 0:
raise QubesException("Failed to send backup: error in the sending process")
if vmproc:
print "VMProc1 proc return code:",vmproc.poll()
print "Sparse1 proc return code:",tar_sparse.poll()
vmproc.stdin.close()
'''
' Wait for backup chunk to finish
' - Monitor all the processes (streamproc, hmac, vmproc, addproc) for errors
' - Copy stdout of streamproc to backup_target and hmac stdin if available
' - Compute progress based on total_backup_sz and send progress to progress_callback function
' - Returns if
' - one of the monitored processes error out (streamproc, hmac, vmproc, addproc), along with the processe that failed
' - all of the monitored processes except vmproc finished successfully (vmproc termination is controlled by the python script)
' - streamproc does not delivers any data anymore (return with the error "paused")
'''
def wait_backup_feedback(progress_callback, in_stream, streamproc, backup_target, total_backup_sz, hmac=None, vmproc=None, addproc=None, remove_trailing_bytes=0):
buffer_size = 4096
run_error = None
run_count = 1
blocks_backedup = 0
while run_count > 0 and run_error == None:
buffer = in_stream.read(buffer_size)
progress_callback(len(buffer),total_backup_sz)
run_count = 0
if hmac:
retcode=hmac.poll()
if retcode != None:
if retcode != 0:
run_error = "hmac"
else:
run_count += 1
if addproc:
retcode=addproc.poll()
#print "Tar proc status:",retcode
if retcode != None:
if retcode != 0:
run_error = "addproc"
else:
run_count += 1
if vmproc:
retcode = vmproc.poll()
if retcode != None:
if retcode != 0:
run_error = "VM"
print vmproc.stdout.read()
else:
# VM should run until the end
pass
if streamproc:
retcode=streamproc.poll()
if retcode != None:
if retcode != 0:
run_error = "streamproc"
elif retcode == 0 and len(buffer) <= 0:
return ""
else:
#print "INFO: last packet"
#if remove_trailing_bytes > 0:
# print buffer.encode("hex")
# buffer = buffer[:-remove_trailing_bytes]
# print buffer.encode("hex")
backup_target.write(buffer)
if hmac:
hmac.stdin.write(buffer)
run_count += 1
else:
#print "Process running:",len(buffer)
# Process still running
backup_target.write(buffer)
if hmac:
hmac.stdin.write(buffer)
run_count += 1
else:
if len(buffer) <= 0:
return ""
else:
backup_target.write(buffer)
if hmac:
hmac.stdin.write(buffer)
return run_error
def restore_vm_dirs (backup_dir, backup_tmpdir, passphrase, vms_dirs, vms, vms_size, print_callback=None, error_callback=None, progress_callback=None, encrypted=False, appvm=None):
# Setup worker to extract encrypted data chunks to the restore dirs
from multiprocessing import Queue,Process
class Extract_Worker(Process):
def __init__(self,queue,base_dir,passphrase,encrypted,total_size,print_callback,error_callback,progress_callback,vmproc=None):
super(Extract_Worker, self).__init__()
self.queue = queue
self.base_dir = base_dir
self.passphrase = passphrase
self.encrypted = encrypted
self.total_size = total_size
self.blocks_backedup = 0
self.tar2_command = None
self.print_callback = print_callback
self.error_callback = error_callback
self.progress_callback = progress_callback
self.vmproc = vmproc
self.restore_pipe = os.path.join(self.base_dir,"restore_pipe")
print "Creating pipe in:",self.restore_pipe
print os.mkfifo(self.restore_pipe)
def compute_progress(self, new_size, total_size):
self.blocks_backedup += new_size
progress = self.blocks_backedup / float(self.total_size)
progress = int(round(progress*100,2))
self.progress_callback(progress)
def run(self):
self.print_callback("Started sending thread")
self.print_callback("Moving to dir "+self.base_dir)
os.chdir(self.base_dir)
for filename in iter(self.queue.get,None):
if filename == "FINISHED":
break
self.print_callback("Extracting file "+filename+" to "+qubes_base_dir)
if self.tar2_command == None:
# FIXME: Make the extraction safer by avoiding to erase other vms:
# - extracting directly to the target directory (based on the vm name and by using the --strip=2).
# - ensuring that the leading slashs are ignored when extracting (can also be obtained by running with --strip ?)
self.tar2_command = ['tar', '--tape-length','1000000', '-C', qubes_base_dir, '-xvf', self.restore_pipe]
self.print_callback("Running command "+str(self.tar2_command))
self.tar2_command = subprocess.Popen(self.tar2_command,stdin=subprocess.PIPE)
pipe = open(self.restore_pipe,'r+b')
if self.encrypted:
# Start decrypt
encryptor = subprocess.Popen (["openssl", "enc", "-d", "-aes-256-cbc", "-pass", "pass:"+passphrase], stdin=open(filename,'rb'), stdout=subprocess.PIPE)
# progress_callback, in_stream, streamproc, backup_target, total_backup_sz, hmac=None, vmproc=None, addproc=None, remove_trailing_bytes=0):
run_error = wait_backup_feedback(self.compute_progress, encryptor.stdout, encryptor, pipe, self.total_size, hmac=None, vmproc=self.vmproc, addproc=self.tar2_command)
#print "End wait_backup_feedback",run_error,self.tar2_command.poll(),encryptor.poll()
else:
run_error = wait_backup_feedback(self.compute_progress, open(filename,"rb"), None, pipe, self.total_size, hmac=None, vmproc=self.vmproc, addproc=self.tar2_command)
pipe.close()
self.print_callback("Run error:"+run_error)
self.print_callback(str(self.tar2_command.poll()))
if self.tar2_command.poll() != None:
if self.tar2_command.poll() != 0:
raise QubesException("ERROR: unable to extract files for {0}.".format(filename))
else:
# Finished extracting the tar file
self.tar2_command = None
else:
self.print_callback("Releasing next chunck")
self.tar2_command.stdin.write("\n")
# Delete the file as we don't need it anymore
self.print_callback("Removing file "+filename)
os.remove(filename)
self.print_callback("Finished extracting thread")
if progress_callback == None:
def progress_callback(data):
pass
to_extract = Queue()
extract_proc = Extract_Worker(to_extract, backup_tmpdir, passphrase, encrypted, vms_size, print_callback, error_callback, progress_callback)
extract_proc.start()
print_callback("Working in temporary dir:"+backup_tmpdir)
print_callback(str(vms_size)+" bytes to restore")
vmproc = None
if appvm != None:
# Prepare the backup target (Qubes service call)
backup_target = "QUBESRPC qubes.Restore none"
# does the vm exist?
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_reading()
qvm_collection.load()
vm = qvm_collection.get_vm_by_name(appvm)
if vm is None or vm.qid not in qvm_collection:
raise QubesException("VM {0} does not exist".format(appvm))
qvm_collection.unlock_db()
# If APPVM, STDOUT is a PIPE
vmproc = vm.run(command = backup_target, passio_popen = True)
vmproc.stdin.write(backup_dir.replace("\r","").replace("\n","")+"\n")
backup_stdin = vmproc.stdout
else:
backup_stdin = open(backup_dir,'rb')
# FIXME: Use a safer program such as cpio, modified uncompress.c, or try to extract it from
tar1_command = ['tar', '-i', '-xv', '-C', backup_tmpdir]
tar1_command.extend(vms_dirs)
print_callback("Run command"+str(tar1_command))
command = subprocess.Popen(tar1_command, stdin=backup_stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while command.poll() == None and vmproc.poll() == None:
filename = command.stdout.readline().strip(" \t\r\n")
print_callback("Getting new file:"+filename)
hmacfile = command.stdout.readline().strip(" \t\r\n")
print_callback("Getting hmac:"+hmacfile)
print_callback("Verifying file"+filename)
hmac_proc = subprocess.Popen (["openssl", "dgst", "-hmac", passphrase], stdin=open(os.path.join(backup_tmpdir,filename),'rb'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = hmac_proc.communicate()
if len(stderr) > 0:
raise QubesException("ERROR: verify file {0}: {1}".format((filename,stderr)))
else:
print_callback("Loading hmac for file"+filename)
hmac = load_hmac(open(os.path.join(backup_tmpdir,filename+".hmac"),'r').read())
if len(hmac) > 0 and load_hmac(stdout) == hmac:
print_callback("File verification OK -> Sending file "+filename+" for extraction")
# Send the chunk to the backup target
to_extract.put(os.path.join(backup_tmpdir,filename))
else:
raise QubesException("ERROR: invalid hmac for file {0}: {1}. Is the passphrase correct?".format(filename,load_hmac(stdout)))
if command.poll() != 0:
raise QubesException("ERROR: unable to read the qubes backup file {0}. Is it really a backup?".format(restore_target))
if vmproc.poll() != 0:
raise QubesException("ERROR: unable to read the qubes backup {0} because of a VM error: {1}".format(restore_target,vmproc.stderr.read()))
print "Extraction process status:",extract_proc.exitcode
to_extract.put("FINISHED")
print_callback("Waiting for the extraction process to finish...")
extract_proc.join()
print_callback("Extraction process finished with code:"+str(extract_proc.exitcode))
if extract_proc.exitcode != 0:
raise QubesException("ERROR: unable to extract the qubes backup. Check extracting process errors.")
def backup_restore_set_defaults(options):
if 'use-default-netvm' not in options:
options['use-default-netvm'] = False
if 'use-none-netvm' not in options:
options['use-none-netvm'] = False
if 'use-default-template' not in options:
options['use-default-template'] = False
if 'dom0-home' not in options:
options['dom0-home'] = True
if 'replace-template' not in options:
options['replace-template'] = []
return options
def load_hmac(hmac):
hmac = hmac.strip(" \t\r\n").split("=")
if len(hmac) > 1:
hmac = hmac[1].strip()
else:
raise QubesException("ERROR: invalid hmac file content")
return hmac
import struct
def get_qfile_error(buffer):
error = struct.unpack("I",buffer[0:4])[0]
error_msg = { 0: "COPY_FILE_OK",
1: "COPY_FILE_READ_EOF",
2: "COPY_FILE_READ_ERROR",
3: "COPY_FILE_WRITE_ERROR",
}
if error in error_msg.keys():
return error_msg[error]
else:
return "UNKNOWN_ERROR_"+str(error)
def backup_restore_header(restore_target, passphrase, encrypt=False, appvm=None):
# Simulate dd if=backup_file count=10 | file -
# Simulate dd if=backup_file count=10 | gpg2 -d | tar xzv -O
# analysis = subprocess.Popen()
vmproc = None
import tempfile
feedback_file = tempfile.NamedTemporaryFile()
backup_tmpdir = tempfile.mkdtemp(prefix="/var/tmp/restore_")
os.chdir(backup_tmpdir)
# Tar with tapelength does not deals well with stdout (close stdout between two tapes)
# For this reason, we will use named pipes instead
print "Working in",backup_tmpdir
if appvm != None:
# Prepare the backup target (Qubes service call)
restore_command = "QUBESRPC qubes.Restore none"
# does the vm exist?
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_reading()
qvm_collection.load()
vm = qvm_collection.get_vm_by_name(appvm)
if vm is None or vm.qid not in qvm_collection:
raise QubesException("VM {0} does not exist".format(appvm))
qvm_collection.unlock_db()
# If APPVM, STDOUT is a PIPE
vmproc = vm.run(command = restore_command, passio_popen = True, passio_stderr = True)
vmproc.stdin.write(restore_target.replace("\r","").replace("\n","")+"\n")
else:
# Create the target directory
if not os.path.exists (restore_target):
raise QubesException("ERROR: the backup directory {0} does not exists".format(restore_target))
fp = open(restore_target,'rb')
headers = fp.read(4096*16)
tar1_command = ['/usr/lib/qubes/qfile-dom0-unpacker', str(os.getuid()), backup_tmpdir]
command = subprocess.Popen(tar1_command,stdin=vmproc.stdout,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
result_header = command.stdout.read()
if vmproc.poll() != None:
error = vmproc.stderr.read()
print error
print vmproc.poll(),command.poll()
raise QubesException("ERROR: Immediate VM error while retrieving backup headers:{0}".format(error))
filename = "qubes.xml.000"
print result_header.encode("hex")
error_msg = get_qfile_error(result_header)
if error_msg != "COPY_FILE_OK":
print vmproc.stdout.read()
raise QubesException("ERROR: unpacking backup headers: {0}".format(error_msg))
if not os.path.exists(os.path.join(backup_tmpdir,filename+".hmac")):
raise QubesException("ERROR: header not extracted correctly: {0}".format(os.path.join(backup_tmpdir,filename+".hmac")))
command.terminate()
command.wait()
if vmproc.poll() != None and vmproc.poll() != 0:
error = vmproc.stderr.read()
print error
print vmproc.poll(),command.poll()
raise QubesException("ERROR: VM error retrieving backup headers")
elif command.poll() != None and command.poll() not in [0,-15]:
error = command.stderr.read()
print error
print vmproc.poll(),command.poll()
raise QubesException("ERROR: retrieving backup headers:{0}".format(error))
if vmproc.poll() == None:
vmproc.terminate()
vmproc.wait()
print "Loading hmac for file",filename
hmac = load_hmac(open(os.path.join(backup_tmpdir,filename+".hmac"),'r').read())
print "Successfully retrieved headers"
print "Verifying file",filename
hmac_proc = subprocess.Popen (["openssl", "dgst", "-hmac", passphrase], stdin=open(os.path.join(backup_tmpdir,filename),'rb'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = hmac_proc.communicate()
if len(stderr) > 0:
raise QubesException("ERROR: verify file {0}: {1}".format((filename,stderr)))
else:
if len(hmac) > 0 and load_hmac(stdout) == hmac:
print "File verification OK -> Extracting archive",filename
if encrypt:
print "Starting decryption process"
encryptor = subprocess.Popen (["openssl", "enc", "-d", "-aes-256-cbc", "-pass", "pass:"+passphrase], stdin=open(os.path.join(backup_tmpdir,filename),'rb'), stdout=subprocess.PIPE)
tarhead_command = subprocess.Popen(['tar', '--tape-length','1000000', '-xv'],stdin=encryptor.stdout)
else:
print "No decryption process required"
encryptor = None
tarhead_command = subprocess.Popen(['tar', '--tape-length','1000000', '-xvf', os.path.join(backup_tmpdir,filename)])
tarhead_command.wait()
if encryptor:
if encryptor.poll() != 0:
raise QubesException("ERROR: unable to decrypt file {0}".format(filename))
if tarhead_command.poll() != 0:
raise QubesException("ERROR: unable to extract the qubes.xml file. Is archive encrypted?")
return (backup_tmpdir,"qubes.xml")
else:
raise QubesException("ERROR: unable to verify the qubes.xml file. Is the passphrase correct?")
return None
def backup_restore_prepare(backup_dir, qubes_xml, passphrase, options = {}, host_collection = None, encrypt=False, appvm=None):
# Defaults
backup_restore_set_defaults(options)
#### Private functions begin
def is_vm_included_in_backup (backup_dir, vm):
if vm.qid == 0:
# Dom0 is not included, obviously
return False
if vm.backup_content:
return True
else:
return False
def find_template_name(template, replaces):
rx_replace = re.compile("(.*):(.*)")
for r in replaces:
m = rx_replace.match(r)
if m.group(1) == template:
return m.group(2)
return template
#### Private functions end
print "Loading file",qubes_xml
backup_collection = QubesVmCollection(store_filename = qubes_xml)
backup_collection.lock_db_for_reading()
backup_collection.load()
if host_collection is None:
host_collection = QubesVmCollection()
host_collection.lock_db_for_reading()
host_collection.load()
host_collection.unlock_db()
backup_vms_list = [vm for vm in backup_collection.values()]
host_vms_list = [vm for vm in host_collection.values()]
vms_to_restore = {}
there_are_conflicting_vms = False
there_are_missing_templates = False
there_are_missing_netvms = False
dom0_username_mismatch = False
restore_home = False
# ... and the actual data
for vm in backup_vms_list:
if is_vm_included_in_backup (backup_dir, vm):
print vm.name,"is included in backup"
vms_to_restore[vm.name] = {}
vms_to_restore[vm.name]['vm'] = vm;
if 'exclude' in options.keys():
vms_to_restore[vm.name]['excluded'] = vm.name in options['exclude']
vms_to_restore[vm.name]['good-to-go'] = False
if host_collection.get_vm_by_name (vm.name) is not None:
vms_to_restore[vm.name]['already-exists'] = True
vms_to_restore[vm.name]['good-to-go'] = False
if vm.template is None:
vms_to_restore[vm.name]['template'] = None
else:
templatevm_name = find_template_name(vm.template.name, options['replace-template'])
vms_to_restore[vm.name]['template'] = templatevm_name
template_vm_on_host = host_collection.get_vm_by_name (templatevm_name)
# No template on the host?
if not ((template_vm_on_host is not None) and template_vm_on_host.is_template()):
# Maybe the (custom) template is in the backup?
template_vm_on_backup = backup_collection.get_vm_by_name (templatevm_name)
if template_vm_on_backup is None or not \
(is_vm_included_in_backup(backup_dir, template_vm_on_backup) and \
template_vm_on_backup.is_template()):
if options['use-default-template']:
vms_to_restore[vm.name]['orig-template'] = templatevm_name
vms_to_restore[vm.name]['template'] = host_collection.get_default_template().name
else:
vms_to_restore[vm.name]['missing-template'] = True
vms_to_restore[vm.name]['good-to-go'] = False
if vm.netvm is None:
vms_to_restore[vm.name]['netvm'] = None
else:
netvm_name = vm.netvm.name
vms_to_restore[vm.name]['netvm'] = netvm_name
# Set to None to not confuse QubesVm object from backup
# collection with host collection (further in clone_attrs). Set
# directly _netvm to suppress setter action, especially
# modifying firewall
vm._netvm = None
netvm_on_host = host_collection.get_vm_by_name (netvm_name)
# No netvm on the host?
if not ((netvm_on_host is not None) and netvm_on_host.is_netvm()):
# Maybe the (custom) netvm is in the backup?
netvm_on_backup = backup_collection.get_vm_by_name (netvm_name)
if not ((netvm_on_backup is not None) and netvm_on_backup.is_netvm() and is_vm_included_in_backup(backup_dir, netvm_on_backup)):
if options['use-default-netvm']:
vms_to_restore[vm.name]['netvm'] = host_collection.get_default_netvm().name
vm.uses_default_netvm = True
elif options['use-none-netvm']:
vms_to_restore[vm.name]['netvm'] = None
else:
vms_to_restore[vm.name]['missing-netvm'] = True
vms_to_restore[vm.name]['good-to-go'] = False
if 'good-to-go' not in vms_to_restore[vm.name].keys():
vms_to_restore[vm.name]['good-to-go'] = True
# ...and dom0 home
# FIXME, replace this part of code to handle the new backup format using tar
if options['dom0-home'] and os.path.exists(backup_dir + '/dom0-home'):
vms_to_restore['dom0'] = {}
local_user = grp.getgrnam('qubes').gr_mem[0]
dom0_homes = os.listdir(backup_dir + '/dom0-home')
if len(dom0_homes) > 1:
raise QubesException("More than one dom0 homedir in backup")
vms_to_restore['dom0']['username'] = dom0_homes[0]
if dom0_homes[0] != local_user:
vms_to_restore['dom0']['username-mismatch'] = True
if not options['ignore-dom0-username-mismatch']:
vms_to_restore['dom0']['good-to-go'] = False
if 'good-to-go' not in vms_to_restore['dom0']:
vms_to_restore['dom0']['good-to-go'] = True
return vms_to_restore
def backup_restore_print_summary(restore_info, print_callback = print_stdout):
fields = {
"qid": {"func": "vm.qid"},
"name": {"func": "('[' if vm.is_template() else '')\
+ ('{' if vm.is_netvm() else '')\
+ vm.name \
+ (']' if vm.is_template() else '')\
+ ('}' if vm.is_netvm() else '')"},
"type": {"func": "'Tpl' if vm.is_template() else \
'HVM' if vm.type == 'HVM' else \
vm.type.replace('VM','')"},
"updbl" : {"func": "'Yes' if vm.updateable else ''"},
"template": {"func": "'n/a' if vm.is_template() or vm.template is None else\
vm_info['template']"},
"netvm": {"func": "'n/a' if vm.is_netvm() and not vm.is_proxyvm() else\
('*' if vm.uses_default_netvm else '') +\
vm_info['netvm'] if vm_info['netvm'] is not None else '-'"},
"label" : {"func" : "vm.label.name"},
}
fields_to_display = ["name", "type", "template", "updbl", "netvm", "label" ]
# First calculate the maximum width of each field we want to display
total_width = 0;
for f in fields_to_display:
fields[f]["max_width"] = len(f)
for vm_info in restore_info.values():
if 'vm' in vm_info.keys():
vm = vm_info['vm']
l = len(str(eval(fields[f]["func"])))
if l > fields[f]["max_width"]:
fields[f]["max_width"] = l
total_width += fields[f]["max_width"]
print_callback("")
print_callback("The following VMs are included in the backup:")
print_callback("")
# Display the header
s = ""
for f in fields_to_display:
fmt="{{0:-^{0}}}-+".format(fields[f]["max_width"] + 1)
s += fmt.format('-')
print_callback(s)
s = ""
for f in fields_to_display:
fmt="{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
s += fmt.format(f)
print_callback(s)
s = ""
for f in fields_to_display:
fmt="{{0:-^{0}}}-+".format(fields[f]["max_width"] + 1)
s += fmt.format('-')
print_callback(s)
for vm_info in restore_info.values():
# Skip non-VM here
if not 'vm' in vm_info:
continue
vm = vm_info['vm']
s = ""
for f in fields_to_display:
fmt="{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
s += fmt.format(eval(fields[f]["func"]))
if 'excluded' in vm_info and vm_info['excluded']:
s += " <-- Excluded from restore"
elif 'already-exists' in vm_info:
s += " <-- A VM with the same name already exists on the host!"
elif 'missing-template' in vm_info:
s += " <-- No matching template on the host or in the backup found!"
elif 'missing-netvm' in vm_info:
s += " <-- No matching netvm on the host or in the backup found!"
elif 'orig-template' in vm_info:
s += " <-- Original template was '%s'" % (vm_info['orig-template'])
print_callback(s)
if 'dom0' in restore_info.keys():
s = ""
for f in fields_to_display:
fmt="{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
if f == "name":
s += fmt.format("Dom0")
elif f == "type":
s += fmt.format("Home")
else:
s += fmt.format("")
if 'username-mismatch' in restore_info['dom0']:
s += " <-- username in backup and dom0 mismatch"
print_callback(s)
def backup_restore_do(backup_dir, restore_tmpdir, passphrase, restore_info, host_collection = None, print_callback = print_stdout, error_callback = print_stderr, progress_callback = None, encrypted=False, appvm=None):
lock_obtained = False
if host_collection is None:
host_collection = QubesVmCollection()
host_collection.lock_db_for_writing()
host_collection.load()
lock_obtained = True
# Perform VM restoration in backup order
vms_dirs = []
vms_size = 0
vms = {}
for vm_info in restore_info.values():
if not vm_info['good-to-go']:
continue
if 'vm' not in vm_info:
continue
vm = vm_info['vm']
vms_size += vm.backup_size
vms_dirs.append(vm.backup_path+"*")
vms[vm.name] = vm
restore_vm_dirs (backup_dir, restore_tmpdir, passphrase, vms_dirs, vms, vms_size, print_callback, error_callback, progress_callback, encrypted, appvm)
# Add VM in right order
for (vm_class_name, vm_class) in sorted(QubesVmClasses.items(),
key=lambda _x: _x[1].load_order):
for vm_info in restore_info.values():
if not vm_info['good-to-go']:
continue
if 'vm' not in vm_info:
continue
vm = vm_info['vm']
if not vm.__class__ == vm_class:
continue
print_callback("-> Restoring {type} {0}...".format(vm.name, type=vm_class_name))
retcode = subprocess.call (["mkdir", "-p", vm.dir_path])
if retcode != 0:
error_callback("*** Cannot create directory: {0}?!".format(dest_dir))
error_callback("Skipping...")
continue
template = None
if vm.template is not None:
template_name = vm_info['template']
template = host_collection.get_vm_by_name(template_name)
new_vm = None
try:
new_vm = host_collection.add_new_vm(vm_class_name, name=vm.name,
conf_file=vm.conf_file,
dir_path=vm.dir_path,
template=template,
installed_by_rpm=False)
new_vm.verify_files()
except Exception as err:
error_callback("ERROR: {0}".format(err))
error_callback("*** Skipping VM: {0}".format(vm.name))
if new_vm:
host_collection.pop(new_vm.qid)
continue
try:
new_vm.clone_attrs(vm)
except Exception as err:
error_callback("ERROR: {0}".format(err))
error_callback("*** Some VM property will not be restored")
try:
new_vm.create_appmenus(verbose=True)
except Exception as err:
error_callback("ERROR during appmenu restore: {0}".format(err))
error_callback("*** VM '{0}' will not have appmenus".format(vm.name))
# Set network dependencies - only non-default netvm setting
for vm_info in restore_info.values():
if not vm_info['good-to-go']:
continue
if 'vm' not in vm_info:
continue
vm = vm_info['vm']
host_vm = host_collection.get_vm_by_name(vm.name)
if host_vm is None:
# Failed/skipped VM
continue
if not vm.uses_default_netvm:
host_vm.netvm = host_collection.get_vm_by_name (vm_info['netvm']) if vm_info['netvm'] is not None else None
host_collection.save()
if lock_obtained:
host_collection.unlock_db()
# ... and dom0 home as last step
if 'dom0' in restore_info.keys() and restore_info['dom0']['good-to-go']:
backup_info = restore_info['dom0']
local_user = grp.getgrnam('qubes').gr_mem[0]
home_dir = pwd.getpwnam(local_user).pw_dir
backup_dom0_home_dir = backup_dir + '/dom0-home/' + backup_info['username']
restore_home_backupdir = "home-pre-restore-{0}".format (time.strftime("%Y-%m-%d-%H%M%S"))
print_callback("-> Restoring home of user '{0}'...".format(local_user))
print_callback("--> Existing files/dirs backed up in '{0}' dir".format(restore_home_backupdir))
os.mkdir(home_dir + '/' + restore_home_backupdir)
for f in os.listdir(backup_dom0_home_dir):
home_file = home_dir + '/' + f
if os.path.exists(home_file):
os.rename(home_file, home_dir + '/' + restore_home_backupdir + '/' + f)
retcode = subprocess.call (["cp", "-nrp", backup_dom0_home_dir + '/' + f, home_file])
if retcode != 0:
error_callback("*** Error while copying file {0} to {1}".format(backup_dom0_home_dir + '/' + f, home_file))
retcode = subprocess.call(['sudo', 'chown', '-R', local_user, home_dir])
if retcode != 0:
error_callback("*** Error while setting home directory owner")
# vim:sw=4:et:
| gpl-2.0 | -220,990,330,861,483,870 | 38.955533 | 217 | 0.577195 | false | 3.644711 | false | false | false |
hehongliang/tensorflow | tensorflow/python/kernel_tests/unstack_op_test.py | 1 | 6055 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Unstack Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class UnstackOpTest(test.TestCase):
def testSimple(self):
np.random.seed(7)
with test_util.use_gpu():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
for dtype in [
np.bool, np.float16, np.float32, np.float64, np.int32, np.int64
]:
data = np.random.randn(*shape).astype(dtype)
# Convert data to a single tensorflow tensor
x = constant_op.constant(data)
# Unstack into a list of tensors
cs = array_ops.unstack(x, num=shape[0])
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
cs = [self.evaluate(c) for c in cs]
self.assertAllEqual(cs, data)
def testSimpleGpu(self):
if not test_util.is_gpu_available():
self.skipTest('No GPU available')
np.random.seed(7)
with test_util.force_gpu():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
for dtype in [np.float16, np.float32, np.float64, np.int32, np.int64]:
data = np.random.randn(*shape).astype(dtype)
# Convert data to a single tensorflow tensor
x = constant_op.constant(data)
# Unstack into a list of tensors
cs = array_ops.unstack(x, num=shape[0])
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
cs = [self.evaluate(c) for c in cs]
self.assertAllEqual(cs, data)
def testGradientsAxis0(self):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
for i in xrange(shape[0]):
with self.cached_session(use_gpu=True):
x = constant_op.constant(data)
cs = array_ops.unstack(x, num=shape[0])
err = gradient_checker.compute_gradient_error(x, shape, cs[i],
shapes[i])
self.assertLess(err, 1e-6)
def testGradientsAxis1(self):
for shape in (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
out_shape = list(shape)
del out_shape[1]
for i in xrange(shape[1]):
with self.cached_session(use_gpu=True):
x = constant_op.constant(data)
cs = array_ops.unstack(x, num=shape[1], axis=1)
err = gradient_checker.compute_gradient_error(x, shape, cs[i],
out_shape)
self.assertLess(err, 1e-6)
def testInferNum(self):
with self.cached_session():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
x = array_ops.placeholder(np.float32, shape=shape)
cs = array_ops.unstack(x)
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
def testCannotInferNumFromUnknownShape(self):
x = array_ops.placeholder(np.float32)
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape <unknown>'):
array_ops.unstack(x)
def testUnknownShapeOkWithNum(self):
x = array_ops.placeholder(np.float32)
array_ops.unstack(x, num=2)
def testCannotInferNumFromNoneShape(self):
x = array_ops.placeholder(np.float32, shape=(None,))
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape \((\?|None),\)'):
array_ops.unstack(x)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for i in range(1, 6):
a = np.random.random(np.random.permutation(i) + 1)
# For all the possible axis to split it, including negative indices.
for j in range(-i, i):
expected = np_split_squeeze(a, j)
actual_unstack = self.evaluate(array_ops.unstack(a, axis=j))
self.assertAllEqual(expected, actual_unstack)
def testAxis0Default(self):
a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
unstacked = self.evaluate(array_ops.unstack(a))
self.assertEqual(len(unstacked), 2)
self.assertAllEqual(unstacked[0], [1, 2, 3])
self.assertAllEqual(unstacked[1], [4, 5, 6])
def testAxisOutOfRange(self):
a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
with self.assertRaisesRegexp(ValueError, r'axis = 2 not in \[-2, 2\)'):
array_ops.unstack(a, axis=2)
def testAxisOutOfNegativeRange(self):
a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
with self.assertRaisesRegexp(ValueError, r'axis = -3 not in \[-2, 2\)'):
array_ops.unstack(a, axis=-3)
def testZeroLengthDim(self):
x = array_ops.zeros(shape=(0, 1, 2))
y = self.evaluate(array_ops.unstack(x, axis=1)[0])
self.assertEqual(y.shape, (0, 2))
if __name__ == '__main__':
test.main()
| apache-2.0 | -1,743,473,666,163,420,000 | 35.920732 | 80 | 0.608423 | false | 3.46 | true | false | false |
jenfly/atmos-read | scripts/merra-replace-data.py | 1 | 5275 | """
Replace corrupted data files with daily data re-downloaded with wget
"""
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import os
import shutil
import xarray as xray
import numpy as np
import collections
import time
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
datadir = '/net/eady/data1/jwalker/datastore/merra2/wget/'
savedir = '/net/eady/data1/jwalker/datastore/merra2/merged/'
probdata = pd.read_csv('scripts/merra_urls/merge_data.csv', index_col=0)
# For each corrupted data file:
# - load the corrupted data file
# - load the new downloaded file for the problem day
# - calculate d/dp and other stuff
# - merge the data for the affected day
# - save into data file for the year
def latlon_filestr(lat1, lat2, lon1, lon2):
"""Return nicely formatted string for lat-lon range."""
latstr = atm.latlon_str(lat1, lat2, 'lat')
lonstr = atm.latlon_str(lon1, lon2, 'lon')
return lonstr + '_' + latstr
def latlon_data(var, lat1, lat2, lon1, lon2, plev=None):
"""Extract lat-lon subset of data."""
name = var.name
varnm = name
subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
if plev is not None:
name = name + '%d' % plev
subset_dict['plev'] = (plev, plev)
var = atm.subset(var, subset_dict, copy=False, squeeze=True)
var.name = name
var.attrs['filestr'] = '%s_%s' % (name, latlonstr)
var.attrs['varnm'] = varnm
return var
def pgradient(var, lat1, lat2, lon1, lon2, plev):
"""Return d/dp of a lat-lon variable."""
pwidth = 100
p1, p2 = plev - pwidth, plev + pwidth
var = atm.subset(var, {'lat' : (lat1, lat2), 'lon' : (lon1, lon2),
'plev' : (p1, p2)}, copy=False, squeeze=True)
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
attrs = var.attrs
pname = atm.get_coord(var, 'plev', 'name')
pdim = atm.get_coord(var, 'plev', 'dim')
pres = var[pname]
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dvar_dp = atm.gradient(var, pres, axis=pdim)
dvar_dp = atm.subset(dvar_dp, {pname : (plev, plev)}, copy=False,
squeeze=True)
varnm = 'D%sDP' % var.name
name = '%s%d' % (varnm, plev)
dvar_dp.name = name
attrs['long_name'] = 'd/dp of ' + var.attrs['long_name']
attrs['standard_name'] = 'd/dp of ' + var.attrs['standard_name']
attrs['units'] = ('(%s)/Pa' % attrs['units'])
attrs[pname] = plev
attrs['filestr'] = '%s_%s' % (name, latlonstr)
attrs['varnm'] = varnm
dvar_dp.attrs = attrs
return dvar_dp
def var_calcs(filenm, varnm, plev, latlon=(-90, 90, 40, 120)):
"""Process a single variable from a single day."""
lat1, lat2, lon1, lon2 = latlon
if varnm == 'DUDP':
nm, dp = 'U', True
elif varnm == 'DOMEGADP':
nm, dp = 'OMEGA', True
else:
nm, dp = varnm, False
with xray.open_dataset(filenm) as ds:
var = ds[nm].load()
if dp:
print('Computing d/dp')
var = pgradient(var, lat1, lat2, lon1, lon2, plev)
else:
var = latlon_data(var, lat1, lat2, lon1, lon2, plev)
return var
def process_row(row, datadir, savedir):
filenm1 = row['filename']
year = row['year']
varnm = row['varnm']
plev = row['plev']
jday = row['jday']
filenm2 = datadir + row['datfile']
savefile1 = filenm1
savefile2 = savedir + os.path.split(filenm1)[1]
print('%d, %s, plev=%d' % (year, varnm, plev))
print('Reading original data from ' + filenm1)
with xray.open_dataset(filenm1) as ds:
var1 = ds[varnm].load()
print('Processing new data from ' + filenm2)
var2 = var_calcs(filenm2, varnm, plev)
print('Merging data for jday %d' % jday)
var = var1.copy()
ind = jday - 1
days = atm.get_coord(var1, 'day')
if not days[ind] == jday:
raise ValueError('Days not indexed from 1, need to edit code to handle')
var[ind] = var2
print('Saving to ' + savefile1)
var.to_netcdf(savefile1)
print('Saving to ' + savefile2)
var.to_netcdf(savefile2)
data = {'orig' : var1, 'new' : var2, 'merged' : var}
return data
# Make a copy of each of the original files -- only run this code once!
# for filenm in probdata['filename']:
# shutil.copyfile(filenm, filenm.replace('.nc', '_orig.nc'))
for i, row in probdata.iterrows():
data = process_row(row, datadir, savedir)
# Plot data to check
def plot_data(probdata, savedir, i):
row = probdata.iloc[i]
filenm = row['filename']
filenm = savedir + os.path.split(filenm)[1]
jday = row['jday']
varnm = row['varnm']
with xray.open_dataset(filenm) as ds:
var = ds[varnm].load()
plt.figure(figsize=(16, 8))
plt.suptitle(os.path.split(filenm)[1])
plt.subplot(1, 3, 1)
atm.pcolor_latlon(var.sel(day=(jday-1)))
plt.title(jday - 1)
plt.subplot(1, 3, 2)
atm.pcolor_latlon(var.sel(day=jday))
plt.title(jday)
plt.subplot(1, 3, 3)
atm.pcolor_latlon(var.sel(day=(jday+1)))
plt.title(jday + 1) | mit | -9,150,407,698,078,713,000 | 31.975 | 80 | 0.609289 | false | 2.836022 | false | false | false |
ivanyu/rosalind | algorithmic_heights/sc/sc.py | 1 | 1421 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def main(argv):
from sc_logic import check_semi_connectedness
graphs = []
if len(argv) < 2:
print('k = 2')
k = 2
print('Graph 1:')
print('n = 3')
n = 3
print('m = 2')
m = 2
g = [[0 for i in range(n)] for _ in range(n)]
print('3 2')
g[2][1] = 1
print('2 1')
g[1][0] = 1
graphs.append(g)
print('Graph 2:')
print('n = 3')
n = 3
print('m = 2')
m = 2
g = [[0 for i in range(n)] for _ in range(n)]
print('3 2')
g[2][1] = 1
print('1 2')
g[0][1] = 1
graphs.append(g)
else:
with open(argv[1]) as f:
k = int(f.readline().strip())
for _ in range(k):
f.readline()
line = f.readline()
n, m = [int(x.strip()) for x in line.strip().split()]
g = [[0 for _ in range(n)] for _ in range(n)]
for edge in range(m):
line = f.readline()
i, j = [int(x.strip()) for x in line.strip().split()]
g[i - 1][j - 1] = 1
graphs.append(g)
for g in graphs:
r = check_semi_connectedness(g)
print('1' if r else -1, end=' ')
if __name__ == "__main__":
import sys
main(sys.argv)
| mit | -6,796,745,880,054,113,000 | 23.084746 | 73 | 0.398311 | false | 3.367299 | false | false | false |
openstack/tacker | tacker/objects/grant.py | 1 | 11110 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker import objects
from tacker.objects import base
from tacker.objects import fields
@base.TackerObjectRegistry.register
class Grant(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.StringField(nullable=False),
'vnf_instance_id': fields.StringField(nullable=False),
'vnf_lcm_op_occ_id': fields.StringField(nullable=False),
'vim_connections': fields.ListOfObjectsField(
'VimConnectionInfo', nullable=True, default=[]),
'zones': fields.ListOfObjectsField(
'ZoneInfo', nullable=True, default=[]),
'add_resources': fields.ListOfObjectsField(
'GrantInfo', nullable=True, default=[]),
'remove_resources': fields.ListOfObjectsField(
'GrantInfo', nullable=True, default=[]),
'update_resources': fields.ListOfObjectsField(
'GrantInfo', nullable=True, default=[]),
'vim_assets': fields.ObjectField(
'VimAssets', nullable=True),
'ext_virtual_links': fields.ListOfObjectsField(
'ExtVirtualLinkData', nullable=True, default=[]),
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_grant = super(
Grant, cls).obj_from_primitive(primitive, context)
else:
if 'vim_connections' in primitive.keys():
obj_data = [objects.VimConnectionInfo._from_dict(
vim_conn) for vim_conn in primitive.get(
'vim_connections', [])]
primitive.update({'vim_connections': obj_data})
if 'zones' in primitive.keys():
obj_data = [ZoneInfo._from_dict(
zone) for zone in primitive.get(
'zones', [])]
primitive.update({'zones': obj_data})
if 'add_resources' in primitive.keys():
obj_data = [GrantInfo._from_dict(
add_rsc) for add_rsc in primitive.get(
'add_resources', [])]
primitive.update({'add_resources': obj_data})
if 'remove_resources' in primitive.keys():
obj_data = [GrantInfo._from_dict(
remove_rsc) for remove_rsc in primitive.get(
'remove_resources', [])]
primitive.update({'remove_resources': obj_data})
if 'update_resources' in primitive.keys():
obj_data = [GrantInfo._from_dict(
update_rsc) for update_rsc in primitive.get(
'update_resources', [])]
primitive.update({'update_resources': obj_data})
if 'vim_assets' in primitive.keys():
obj_data = VimAssets.obj_from_primitive(
primitive.get('vim_assets'), context)
primitive.update({'vim_assets': obj_data})
if 'ext_virtual_links' in primitive.keys():
obj_data = [objects.ExtVirtualLinkData.obj_from_primitive(
ext_vir_link, context) for ext_vir_link in primitive.get(
'ext_virtual_links', [])]
primitive.update({'ext_virtual_links': obj_data})
obj_grant = Grant._from_dict(primitive)
return obj_grant
@classmethod
def _from_dict(cls, data_dict):
id = data_dict.get('id')
vnf_instance_id = data_dict.get('vnf_instance_id')
vnf_lcm_op_occ_id = data_dict.get('vnf_lcm_op_occ_id')
vim_connections = data_dict.get('vim_connections', [])
zones = data_dict.get('zones', [])
add_resources = data_dict.get('add_resources', [])
remove_resources = data_dict.get('remove_resources', [])
update_resources = data_dict.get('update_resources', [])
vim_assets = data_dict.get('vim_assets')
ext_virtual_links = data_dict.get('ext_virtual_links', [])
obj = cls(
id=id,
vnf_instance_id=vnf_instance_id,
vnf_lcm_op_occ_id=vnf_lcm_op_occ_id,
vim_connections=vim_connections,
zones=zones,
add_resources=add_resources,
remove_resources=remove_resources,
update_resources=update_resources,
vim_assets=vim_assets,
ext_virtual_links=ext_virtual_links)
return obj
@base.TackerObjectRegistry.register
class ZoneInfo(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.StringField(nullable=False),
'zone_id': fields.StringField(nullable=False),
'vim_connection_id': fields.StringField(nullable=True)
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_zone_info = super(
ZoneInfo, cls).obj_from_primitive(primitive, context)
else:
obj_zone_info = ZoneInfo._from_dict(primitive)
return obj_zone_info
@classmethod
def _from_dict(cls, data_dict):
id = data_dict.get('id')
zone_id = data_dict.get('zone_id')
vim_connection_id = data_dict.get('vim_connection_id')
obj = cls(
id=id,
zone_id=zone_id,
vim_connection_id=vim_connection_id)
return obj
@base.TackerObjectRegistry.register
class GrantInfo(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'resource_definition_id': fields.StringField(nullable=False),
'vim_connection_id': fields.StringField(nullable=True),
'zone_id': fields.StringField(nullable=True)
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_grant_info = super(
GrantInfo, cls).obj_from_primitive(primitive, context)
else:
obj_grant_info = GrantInfo._from_dict(primitive)
return obj_grant_info
@classmethod
def _from_dict(cls, data_dict):
resource_definition_id = data_dict.get('resource_definition_id')
vim_connection_id = data_dict.get('vim_connection_id')
zone_id = data_dict.get('zone_id')
obj = cls(
resource_definition_id=resource_definition_id,
vim_connection_id=vim_connection_id,
zone_id=zone_id)
return obj
@base.TackerObjectRegistry.register
class VimAssets(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'compute_resource_flavours': fields.ListOfObjectsField(
'VimComputeResourceFlavour', nullable=True, default=[]),
'software_images': fields.ListOfObjectsField(
'VimSoftwareImage', nullable=True, default=[])
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_vim_assets = super(
VimAssets, cls).obj_from_primitive(primitive, context)
else:
if 'compute_resource_flavours' in primitive.keys():
obj_data = [VimComputeResourceFlavour._from_dict(
flavour) for flavour in primitive.get(
'compute_resource_flavours', [])]
primitive.update({'compute_resource_flavours': obj_data})
if 'software_images' in primitive.keys():
obj_data = [VimSoftwareImage._from_dict(
img) for img in primitive.get(
'software_images', [])]
primitive.update({'software_images': obj_data})
obj_vim_assets = VimAssets._from_dict(primitive)
return obj_vim_assets
@classmethod
def _from_dict(cls, data_dict):
compute_resource_flavours = data_dict.get(
'compute_resource_flavours', [])
software_images = data_dict.get('software_images', [])
obj = cls(
compute_resource_flavours=compute_resource_flavours,
software_images=software_images)
return obj
@base.TackerObjectRegistry.register
class VimComputeResourceFlavour(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'vim_connection_id': fields.StringField(nullable=True),
'vnfd_virtual_compute_desc_id': fields.StringField(nullable=False),
'vim_flavour_id': fields.StringField(nullable=False)
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_flavour = super(
VimComputeResourceFlavour,
cls).obj_from_primitive(
primitive,
context)
else:
obj_flavour = VimComputeResourceFlavour._from_dict(primitive)
return obj_flavour
@classmethod
def _from_dict(cls, data_dict):
vim_connection_id = data_dict.get('vim_connection_id')
vnfd_virtual_compute_desc_id = data_dict.get(
'vnfd_virtual_compute_desc_id')
vim_flavour_id = data_dict.get('vim_flavour_id')
obj = cls(
vim_connection_id=vim_connection_id,
vnfd_virtual_compute_desc_id=vnfd_virtual_compute_desc_id,
vim_flavour_id=vim_flavour_id)
return obj
@base.TackerObjectRegistry.register
class VimSoftwareImage(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'vim_connection_id': fields.StringField(nullable=True),
'vnfd_software_image_id': fields.StringField(nullable=False),
'vim_software_image_id': fields.StringField(nullable=False)
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_img = super(
VimSoftwareImage, cls).obj_from_primitive(primitive, context)
else:
obj_img = VimSoftwareImage._from_dict(primitive)
return obj_img
@classmethod
def _from_dict(cls, data_dict):
vim_connection_id = data_dict.get('vim_connection_id')
vnfd_software_image_id = data_dict.get('vnfd_software_image_id')
vim_software_image_id = data_dict.get('vim_software_image_id')
obj = cls(
vim_connection_id=vim_connection_id,
vnfd_software_image_id=vnfd_software_image_id,
vim_software_image_id=vim_software_image_id)
return obj
| apache-2.0 | -1,107,050,491,365,563,900 | 35.546053 | 78 | 0.59883 | false | 3.827075 | false | false | false |
ezietsman/msc-thesis | images/makeunflat2.py | 1 | 1059 | from pylab import *
import astronomy as ast
# to format the labels better
from matplotlib.ticker import FormatStrFormatter
fmt = FormatStrFormatter('%1.2g') # or whatever
X1 = load('ec2117ans_1_c.dat')
x1 = X1[:,0]
y1 = 10**(X1[:,2]/(-2.5))
y1 /= average(y1)
T0 = 2453964.3307097
P = 0.1545255
figure(figsize=(6,4))
subplots_adjust(hspace=0.6,left=0.16)
ax = subplot(211)
#plot(x1,y1,'.')
scatter((x1-T0)/P,y1,s=0.8,faceted=False)
xlabel('Orbital Phase')
ylabel('Intensity')
title('Original Lightcurve')
#ylim(min(y1)-0.0000005,max(y1)+0.0000005)
ax.yaxis.set_major_formatter(fmt)
ax = subplot(212)
x2,y2 = ast.signal.dft(x1,y1,0,7000,1)
plot(x2,y2,'k-')
xlabel('Frequency (cycles/day)')
ylabel('Amplitude')
#vlines(3560,0.000000025,0.00000003,color='k',linestyle='solid')
#vlines(950,0.000000025,0.00000003,color='k',linestyle='solid')
#text(3350,0.000000035,'DNO',fontsize=10)
#text(700,0.000000035,'lpDNO',fontsize=10)
xlim(0,7000)
ylim(0,0.004)
title('Periodogram')
#ax.yaxis.set_major_formatter(fmt)
savefig('unflattened.png')
show()
| mit | 3,372,714,419,916,034,000 | 18.981132 | 64 | 0.70255 | false | 2.317287 | false | true | false |
WilJoey/tn_ckan | ckan/new_tests/lib/navl/test_validators.py | 1 | 9126 | # -*- coding: utf-8 -*-
'''Unit tests for ckan/lib/navl/validators.py.
'''
import copy
import nose.tools
import ckan.new_tests.factories as factories
def returns_None(function):
'''A decorator that asserts that the decorated function returns None.
:param function: the function to decorate
:type function: function
Usage:
@returns_None
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
'''
def call_and_assert(*args, **kwargs):
original_args = copy.deepcopy(args)
original_kwargs = copy.deepcopy(kwargs)
result = function(*args, **kwargs)
assert result is None, (
'Should return None when called with args: {args} and '
'kwargs: {kwargs}'.format(args=original_args,
kwargs=original_kwargs))
return result
return call_and_assert
def raises_StopOnError(function):
'''A decorator that asserts that the decorated function raises
dictization_functions.StopOnError.
:param function: the function to decorate
:type function: function
Usage:
@raises_StopOnError
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
'''
def call_and_assert(*args, **kwargs):
import ckan.lib.navl.dictization_functions as df
nose.tools.assert_raises(df.StopOnError, function, *args, **kwargs)
return call_and_assert
def does_not_modify_data_dict(validator):
'''A decorator that asserts that the decorated validator doesn't modify
its `data` dict param.
:param validator: the validator function to decorate
:type validator: function
Usage:
@does_not_modify_data_dict
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
'''
def call_and_assert(key, data, errors, context=None):
if context is None:
context = {}
original_data = copy.deepcopy(data)
original_errors = copy.deepcopy(errors)
original_context = copy.deepcopy(context)
result = validator(key, data, errors, context=context)
assert data == original_data, (
'Should not modify data dict when called with '
'key: {key}, data: {data}, errors: {errors}, '
'context: {context}'.format(key=key, data=original_data,
errors=original_errors,
context=original_context))
return result
return call_and_assert
def removes_key_from_data_dict(validator):
'''A decorator that asserts that the decorated validator removes its key
from the data dict.
:param validator: the validator function to decorate
:type validator: function
Usage:
@removes_key_from_data_dict
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
'''
def call_and_assert(key, data, errors, context=None):
if context is None:
context = {}
original_data = copy.deepcopy(data)
original_errors = copy.deepcopy(errors)
original_context = copy.deepcopy(context)
result = validator(key, data, errors, context=context)
assert key not in data, (
'Should remove key from data dict when called with: '
'key: {key}, data: {data}, errors: {errors}, '
'context: {context} '.format(key=key, data=original_data,
errors=original_errors,
context=original_context))
return result
return call_and_assert
def does_not_modify_other_keys_in_data_dict(validator):
'''A decorator that asserts that the decorated validator doesn't add,
modify the value of, or remove any other keys from its ``data`` dict param.
The function *may* modify its own data dict key.
:param validator: the validator function to decorate
:type validator: function
Usage:
@does_not_modify_other_keys_in_data_dict
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
'''
def call_and_assert(key, data, errors, context=None):
if context is None:
context = {}
original_data = copy.deepcopy(data)
original_errors = copy.deepcopy(errors)
original_context = copy.deepcopy(context)
result = validator(key, data, errors, context=context)
# The validator function is allowed to modify its own key, so remove
# that key from both dicts for the purposes of the assertions below.
if key in data:
del data[key]
if key in original_data:
del original_data[key]
assert data.keys() == original_data.keys(), (
'Should not add or remove keys from data dict when called with '
'key: {key}, data: {data}, errors: {errors}, '
'context: {context}'.format(key=key, data=original_data,
errors=original_errors,
context=original_context))
for key_ in data:
assert data[key_] == original_data[key_], (
'Should not modify other keys in data dict when called with '
'key: {key}, data: {data}, errors: {errors}, '
'context: {context}'.format(key=key, data=original_data,
errors=original_errors,
context=original_context))
return result
return call_and_assert
def does_not_modify_errors_dict(validator):
'''A decorator that asserts that the decorated validator doesn't modify its
`errors` dict param.
:param validator: the validator function to decorate
:type validator: function
Usage:
@does_not_modify_errors_dict
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
'''
def call_and_assert(key, data, errors, context=None):
if context is None:
context = {}
original_data = copy.deepcopy(data)
original_errors = copy.deepcopy(errors)
original_context = copy.deepcopy(context)
result = validator(key, data, errors, context=context)
assert errors == original_errors, (
'Should not modify errors dict when called with key: {key}, '
'data: {data}, errors: {errors}, '
'context: {context}'.format(key=key, data=original_data,
errors=original_errors,
context=original_context))
return result
return call_and_assert
class TestValidators(object):
def test_ignore_missing_with_value_missing(self):
'''ignore_missing() should raise StopOnError if:
- data[key] is None, or
- data[key] is dictization_functions.missing, or
- key is not in data
'''
import ckan.lib.navl.dictization_functions as df
import ckan.lib.navl.validators as validators
for value in (None, df.missing, 'skip'):
# This is the key for the value that is going to be validated.
key = ('key to be validated',)
# The data to pass to the validator function for validation.
data = factories.validator_data_dict()
if value != 'skip':
data[key] = value
# The errors dict to pass to the validator function.
errors = factories.validator_errors_dict()
errors[key] = []
@does_not_modify_other_keys_in_data_dict
@does_not_modify_errors_dict
@removes_key_from_data_dict
@raises_StopOnError
def call_validator(*args, **kwargs):
return validators.ignore_missing(*args, **kwargs)
call_validator(key=key, data=data, errors=errors, context={})
def test_ignore_missing_with_a_value(self):
'''If data[key] is neither None or missing, ignore_missing() should do
nothing.
'''
import ckan.lib.navl.validators as validators
key = ('key to be validated',)
data = factories.validator_data_dict()
data[key] = 'value to be validated'
errors = factories.validator_errors_dict()
errors[key] = []
@returns_None
@does_not_modify_data_dict
@does_not_modify_errors_dict
def call_validator(*args, **kwargs):
return validators.ignore_missing(*args, **kwargs)
call_validator(key=key, data=data, errors=errors, context={})
| mit | 8,180,306,314,881,718,000 | 33.308271 | 79 | 0.595003 | false | 4.321023 | true | false | false |
Parallel-in-Time/pySDC | pySDC/playgrounds/Allen_Cahn/AllenCahn_contracting_circle_standard_integrators.py | 1 | 5930 | import time
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh
from pySDC.implementations.problem_classes.AllenCahn_2D_FD import allencahn_fullyimplicit, allencahn_semiimplicit
# http://www.personal.psu.edu/qud2/Res/Pre/dz09sisc.pdf
def setup_problem():
problem_params = dict()
problem_params['nu'] = 2
problem_params['nvars'] = (128, 128)
problem_params['eps'] = 0.04
problem_params['newton_maxiter'] = 100
problem_params['newton_tol'] = 1E-07
problem_params['lin_tol'] = 1E-08
problem_params['lin_maxiter'] = 100
problem_params['radius'] = 0.25
return problem_params
def run_implicit_Euler(t0, dt, Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
problem = allencahn_fullyimplicit(problem_params=setup_problem(), dtype_u=mesh, dtype_f=mesh)
u = problem.u_exact(t0)
radius = []
exact_radius = []
nsteps = int((Tend - t0) / dt)
startt = time.time()
t = t0
for n in range(nsteps):
u_new = problem.solve_system(rhs=u, factor=dt, u0=u, t=t)
u = u_new
t += dt
r, re = compute_radius(u, problem.dx, t, problem.params.radius)
radius.append(r)
exact_radius.append(re)
print(' ... done with time = %6.4f, step = %i / %i' % (t, n + 1, nsteps))
print('Time to solution: %6.4f sec.' % (time.time() - startt))
fname = 'data/AC_reference_Tend{:.1e}'.format(Tend) + '.npz'
loaded = np.load(fname)
uref = loaded['uend']
err = np.linalg.norm(uref - u, np.inf)
print('Error vs. reference solution: %6.4e' % err)
return err, radius, exact_radius
def run_imex_Euler(t0, dt, Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
problem = allencahn_semiimplicit(problem_params=setup_problem(), dtype_u=mesh, dtype_f=imex_mesh)
u = problem.u_exact(t0)
radius = []
exact_radius = []
nsteps = int((Tend - t0) / dt)
startt = time.time()
t = t0
for n in range(nsteps):
f = problem.eval_f(u, t)
rhs = u + dt * f.expl
u_new = problem.solve_system(rhs=rhs, factor=dt, u0=u, t=t)
u = u_new
t += dt
r, re = compute_radius(u, problem.dx, t, problem.params.radius)
radius.append(r)
exact_radius.append(re)
print(' ... done with time = %6.4f, step = %i / %i' % (t, n + 1, nsteps))
print('Time to solution: %6.4f sec.' % (time.time() - startt))
fname = 'data/AC_reference_Tend{:.1e}'.format(Tend) + '.npz'
loaded = np.load(fname)
uref = loaded['uend']
err = np.linalg.norm(uref - u, np.inf)
print('Error vs. reference solution: %6.4e' % err)
return err, radius, exact_radius
def run_CrankNicholson(t0, dt, Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
problem = allencahn_fullyimplicit(problem_params=setup_problem(), dtype_u=mesh, dtype_f=mesh)
u = problem.u_exact(t0)
radius = []
exact_radius = []
nsteps = int((Tend - t0)/dt)
startt = time.time()
t = t0
for n in range(nsteps):
rhs = u + dt / 2 * problem.eval_f(u, t)
u_new = problem.solve_system(rhs=rhs, factor=dt / 2, u0=u, t=t)
u = u_new
t += dt
r, re = compute_radius(u, problem.dx, t, problem.params.radius)
radius.append(r)
exact_radius.append(re)
print(' ... done with time = %6.4f, step = %i / %i' % (t, n + 1, nsteps))
print('Time to solution: %6.4f sec.' % (time.time() - startt))
fname = 'data/AC_reference_Tend{:.1e}'.format(Tend) + '.npz'
loaded = np.load(fname)
uref = loaded['uend']
err = np.linalg.norm(uref - u, np.inf)
print('Error vs. reference solution: %6.4e' % err)
return err, radius, exact_radius
def compute_radius(u, dx, t, init_radius):
c = np.count_nonzero(u >= 0.0)
radius = np.sqrt(c / np.pi) * dx
exact_radius = np.sqrt(max(init_radius ** 2 - 2.0 * t, 0))
return radius, exact_radius
def plot_radius(xcoords, exact_radius, radii):
fig, ax = plt.subplots()
plt.plot(xcoords, exact_radius, color='k', linestyle='--', linewidth=1, label='exact')
for type, radius in radii.items():
plt.plot(xcoords, radius, linestyle='-', linewidth=2, label=type)
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.set_ylabel('radius')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
fname = 'data/AC_contracting_circle_standard_integrators'
plt.savefig('{}.pdf'.format(fname), bbox_inches='tight')
# plt.show()
def main_radius(cwd=''):
"""
Main driver
Args:
cwd (str): current working directory (need this for testing)
"""
# setup parameters "in time"
t0 = 0.0
dt = 0.001
Tend = 0.032
radii = {}
_, radius, exact_radius = run_implicit_Euler(t0=t0, dt=dt, Tend=Tend)
radii['implicit-Euler'] = radius
_, radius, exact_radius = run_imex_Euler(t0=t0, dt=dt, Tend=Tend)
radii['imex-Euler'] = radius
_, radius, exact_radius = run_CrankNicholson(t0=t0, dt=dt, Tend=Tend)
radii['CrankNicholson'] = radius
xcoords = [t0 + i * dt for i in range(int((Tend - t0) / dt))]
plot_radius(xcoords, exact_radius, radii)
def main_error(cwd=''):
t0 = 0
Tend = 0.032
errors = {}
# err, _, _ = run_implicit_Euler(t0=t0, dt=0.001/512, Tend=Tend)
# errors['implicit-Euler'] = err
# err, _, _ = run_imex_Euler(t0=t0, dt=0.001/512, Tend=Tend)
# errors['imex-Euler'] = err
err, _, _ = run_CrankNicholson(t0=t0, dt=0.001/64, Tend=Tend)
errors['CrankNicholson'] = err
if __name__ == "__main__":
main_error()
# main_radius()
| bsd-2-clause | 7,824,124,446,833,600,000 | 25.008772 | 113 | 0.594266 | false | 2.901174 | false | false | false |
mollie/mollie-api-python | mollie/api/objects/payment.py | 1 | 6068 | from .base import Base
class Payment(Base):
@classmethod
def get_resource_class(cls, client):
from ..resources.payments import Payments
return Payments(client)
STATUS_OPEN = "open"
STATUS_PENDING = "pending"
STATUS_CANCELED = "canceled"
STATUS_EXPIRED = "expired"
STATUS_FAILED = "failed"
STATUS_PAID = "paid"
STATUS_AUTHORIZED = "authorized"
SEQUENCETYPE_ONEOFF = "oneoff"
SEQUENCETYPE_FIRST = "first"
SEQUENCETYPE_RECURRING = "recurring"
# Documented properties
@property
def resource(self):
return self._get_property("resource")
@property
def id(self):
return self._get_property("id")
@property
def mode(self):
return self._get_property("mode")
@property
def created_at(self):
return self._get_property("createdAt")
@property
def status(self):
return self._get_property("status")
@property
def is_cancelable(self):
return self._get_property("isCancelable")
@property
def authorized_at(self):
return self._get_property("authorizedAt")
@property
def paid_at(self):
return self._get_property("paidAt")
@property
def canceled_at(self):
return self._get_property("canceledAt")
@property
def expires_at(self):
return self._get_property("expiresAt")
@property
def expired_at(self):
return self._get_property("expiredAt")
@property
def failed_at(self):
return self._get_property("failedAt")
@property
def amount(self):
return self._get_property("amount")
@property
def amount_refunded(self):
return self._get_property("amountRefunded")
@property
def amount_remaining(self):
return self._get_property("amountRemaining")
@property
def description(self):
return self._get_property("description")
@property
def redirect_url(self):
return self._get_property("redirectUrl")
@property
def webhook_url(self):
return self._get_property("webhookUrl")
@property
def method(self):
return self._get_property("method")
@property
def metadata(self):
return self._get_property("metadata")
@property
def locale(self):
return self._get_property("locale")
@property
def country_code(self):
return self._get_property("countryCode")
@property
def profile_id(self):
return self._get_property("profileId")
@property
def settlement_amount(self):
return self._get_property("settlementAmount")
@property
def settlement_id(self):
return self._get_property("settlementId")
@property
def customer_id(self):
return self._get_property("customerId")
@property
def sequence_type(self):
return self._get_property("sequenceType")
@property
def mandate_id(self):
return self._get_property("mandateId")
@property
def subscription_id(self):
return self._get_property("subscriptionId")
@property
def order_id(self):
return self._get_property("orderId")
@property
def application_fee(self):
return self._get_property("applicationFee")
@property
def details(self):
return self._get_property("details")
# documented _links
@property
def checkout_url(self):
return self._get_link("checkout")
@property
def refunds(self):
"""Return the refunds related to this payment."""
return self.client.payment_refunds.on(self).list()
@property
def chargebacks(self):
"""Return the chargebacks related to this payment."""
return self.client.payment_chargebacks.on(self).list()
@property
def captures(self):
"""Return the captures related to this payment"""
return self.client.captures.on(self).list()
@property
def settlement(self):
"""Return the settlement for this payment."""
return self.client.settlements.get(self.settlement_id)
@property
def mandate(self):
"""Return the mandate for this payment."""
return self.client.customer_mandates.with_parent_id(self.customer_id).get(self.mandate_id)
@property
def subscription(self):
"""Return the subscription for this payment."""
return self.client.customer_subscriptions.with_parent_id(self.customer_id).get(self.subscription_id)
@property
def customer(self):
"""Return the customer for this payment."""
return self.client.customers.get(self.customer_id)
@property
def order(self):
"""Return the order for this payment."""
from ..resources.orders import Order
url = self._get_link("order")
if url:
resp = self.client.orders.perform_api_call(self.client.orders.REST_READ, url)
return Order(resp, self.client)
# additional methods
def is_open(self):
return self._get_property("status") == self.STATUS_OPEN
def is_pending(self):
return self._get_property("status") == self.STATUS_PENDING
def is_canceled(self):
return self._get_property("status") == self.STATUS_CANCELED
def is_expired(self):
return self._get_property("status") == self.STATUS_EXPIRED
def is_failed(self):
return self._get_property("status") == self.STATUS_FAILED
def is_authorized(self):
return self._get_property("status") == self.STATUS_AUTHORIZED
def is_paid(self):
return self._get_property("paidAt") is not None
def has_refunds(self):
return self._get_link("refunds") is not None
def can_be_refunded(self):
return self._get_property("amountRemaining") is not None
def has_sequence_type_first(self):
return self._get_property("sequenceType") == self.SEQUENCETYPE_FIRST
def has_sequence_type_recurring(self):
return self._get_property("sequenceType") == self.SEQUENCETYPE_RECURRING
| bsd-2-clause | -5,234,986,768,338,279,000 | 24.603376 | 108 | 0.6353 | false | 4.007926 | false | false | false |
andrecunha/idd3 | idd3/rules/universal/atomic_emitting_rulesets.py | 1 | 1614 | # -*- coding: utf-8 -*-
# IDD3 - Propositional Idea Density from Dependency Trees
# Copyright (C) 2014-2015 Andre Luiz Verucci da Cunha
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals, division
from idd3 import Ruleset
class AtomicEmittingRuleset(Ruleset):
"""A base ruleset for atomic relations that just emits the associated word
as a proposition."""
def extract(self, relations, index, context, engine, info={}):
engine.emit((relations[index].word,))
class NegRuleset(AtomicEmittingRuleset):
"""A ruleset that processes the 'neg' relation."""
rel = 'neg'
def extract(self, relations, index, context, engine, info={}):
engine.emit((relations[index].word,), 'M')
class DiscourseRuleset(AtomicEmittingRuleset):
"""A ruleset that processes the 'discourse' relation."""
rel = 'discourse'
def extract(self, relations, index, context, engine, info={}):
engine.emit((relations[index].word,), 'M')
| gpl-3.0 | -1,017,622,284,109,164,500 | 31.28 | 78 | 0.716853 | false | 3.898551 | false | false | false |
sfl-drupal/drupalizer | drush.py | 1 | 6916 | # coding: utf-8
#
# Copyright (C) 2016 Savoir-faire Linux Inc. (<www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from fabric.api import task, roles, env
from fabric.contrib.console import confirm
from fabric.colors import red, green
from fabric.utils import abort
from datetime import datetime
import helpers as h
import core as c
from git import isGitDirty
@task(alias='make')
@roles('local')
def make(action='install'):
"""
Build the platform by running the Makefile specified in the local_vars.py configuration file.
"""
if env.get('always_use_pty', True):
if (isGitDirty()):
if (not confirm(red('There are warnings on status of your repositories. '
'Do you want to continue and reset all changes to remote repositories'' states?'), default=False)):
abort('Aborting "drush {}" since there might be a risk of loosing local data.'.format(action))
drush_opts = "--prepare-install " if action != 'update' else ''
# Update profile codebase
if env.site_profile and env.site_profile != '':
drush_opts += "--contrib-destination=profiles/{} ".format(env.site_profile)
h.update_profile()
if not env.get('always_use_pty', True):
drush_opts += "--translations=" + env.site_languages + " "
elif confirm(red('Say [Y] to {} the site at {} with the specified translation(s): {}. If you say [n] '
'the site will be installed in English only'.format(action, env.site_root, env.site_languages))):
drush_opts += "--translations=" + env.site_languages + " "
if env.get('always_use_pty', True):
drush_opts += " --working-copy --no-gitinfofile"
if not h.fab_exists('local', env.site_root):
h.fab_run('local', "mkdir {}".format(env.site_root))
with h.fab_cd('local', env.site_root):
h.fab_run('local', 'drush make {} {} -y'.format(drush_opts, env.makefile))
@task
@roles('local')
def aliases():
"""
Copy conf/aliases.drushrc.php in the site environment.
"""
role = 'local'
drush_aliases = env.site_drush_aliases
workspace = env.workspace
if not h.fab_exists(role, drush_aliases):
h.fab_run(role, 'mkdir {}'.format(drush_aliases))
with h.fab_cd(role, drush_aliases):
# Create aliases
if h.fab_exists(role, '{}/aliases.drushrc.php'.format(drush_aliases)):
h.fab_run(role, 'rm {}/aliases.drushrc.php'.format(drush_aliases))
h.fab_run(role, 'cp {}/conf/aliases.drushrc.php .'.format(workspace))
print(green('Drush aliases have been copied to {} directory.'.format(drush_aliases)))
@task
@roles('docker')
def updatedb():
"""
Run the available database updates. Similar to drush updatedb.
"""
role = 'docker'
with h.fab_cd(role, env.docker_site_root):
h.fab_run(role, 'drush updatedb -y')
h.hook_execute(env.hook_post_update, role)
@task
@roles('docker')
def site_install():
"""
Run the site installation procedure.
"""
role = 'docker'
site_root = env.docker_site_root
apache = env.apache_user
profile = env.site_profile
db_user = env.site_db_user
db_pass = env.site_db_pass
db_host = env.site_db_host
db_name = env.site_db_name
site_name = env.site_name
site_admin_name = env.site_admin_user
site_admin_pass = env.site_admin_pass
site_subdir = env.site_subdir
# Create first the database if necessary
h.init_db('docker')
with h.fab_cd(role, site_root):
locale = '--locale="fr"' if env.locale else ''
h.fab_run(role, 'sudo -u {} drush site-install {} {} --db-url=mysql://{}:{}@{}/{} --site-name="{}" '
'--account-name={} --account-pass={} --sites-subdir={} -y'.format(apache, profile, locale,
db_user, db_pass,
db_host, db_name, site_name,
site_admin_name,
site_admin_pass,
site_subdir))
print(green('Site installed successfully!'))
# Import db_dump if it exists.
if 'db_dump' in env and env.db_dump is not False:
c.db_import(env.db_dump, role)
h.hook_execute(env.hook_post_install, role)
@task
@roles('docker')
def archive_dump(role='docker'):
"""
Archive the platform for release or deployment.
:param role Default 'role' where to run the task
"""
with h.fab_cd(role, env.docker_site_root):
platform = '{}-{}.tar.gz'.format(env.project_name, datetime.now().strftime('%Y%m%d_%H%M%S'))
h.fab_run(role, 'rm -f {}/build/*.tar.gz'.format(env.docker_workspace))
print(green('All tar.gz archives found in {}/build have been deleted.'.format(env.docker_workspace)))
h.fab_run(
role,
'drush archive-dump --destination={}/build/{} --tags="sflinux {}" --generatorversion="2.x" '
'--generator="Drupalizer::fab drush.archive_dump" --tar-options="--exclude=.git"'
''.format(env.docker_workspace, platform, env.project_name)
)
@task
@roles('docker')
def gen_doc(role='docker'):
"""
Generate README file
:param role Default 'role' where to run the task
"""
if h.fab_exists(role, '{}/README.adoc'.format(env.docker_workspace)):
h.fab_run(role, 'asciidoctor -d book -b html5 -o {}/README.html {}/README.adoc'.
format(env.docker_workspace, env.docker_workspace))
print(green('README.html generated in {}'.format(env.docker_workspace)))
if h.fab_exists(role, '{}/CHANGELOG.adoc'.format(env.docker_workspace)):
h.fab_run(role, 'asciidoctor -d book -b html5 -o {}/CHANGELOG.html {}/CHANGELOG.adoc'.
format(env.docker_workspace, env.docker_workspace))
print(green('CHANGELOG.html generated in {}'.format(env.docker_workspace)))
| gpl-3.0 | -5,361,245,927,559,476,000 | 37 | 123 | 0.596877 | false | 3.698396 | false | false | false |
floyd-fuh/afl-crash-analyzer | utilities/Logger.py | 1 | 2025 | #!/usr/bin/env python2.7
'''
AFL crash analyzer, crash triage for the American Fuzzy Lop fuzzer
Copyright (C) 2015 floyd
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Created on Apr 13, 2015
@author: floyd, http://floyd.ch, @floyd_ch
'''
import sys
class Logger():
#TODO: use curses, use colors, etc.
#min 0, max 10 (only used up to 6 atm)
debug_level = 6
#This is the setting if you want the dots to be printed so you see the program is busy
busy_inform = debug_level <= 3 and False
@staticmethod
def setDebug(level):
Logger.debug_level = level
@staticmethod
def error(*text):
print "[-] Error: "+str(" ".join(str(i) for i in text))
@staticmethod
def warning(*text):
print " [-] Warning: "+str(" ".join(str(i) for i in text))
@staticmethod
def fatal(*text):
print "[-] Fatal Error: "+str(" ".join(str(i) for i in text))
exit()
@staticmethod
def info(*text):
print "[+] "+str(" ".join(str(i) for i in text))
@staticmethod
def debug(*text, **kwargs):
level = 2
if "debug_level" in kwargs:
level = kwargs["debug_level"]
if level <= Logger.debug_level:
print " ["+"+"*level+"] "+str(" ".join(str(i) for i in text))
@staticmethod
def busy():
if Logger.busy_inform:
sys.stdout.write(".")
sys.stdout.flush() | gpl-3.0 | -3,715,117,632,580,918,000 | 35.178571 | 90 | 0.628642 | false | 3.785047 | false | false | false |
Bitergia/allura | Allura/allura/ext/admin/admin_main.py | 1 | 36370 | import logging
from collections import defaultdict
from datetime import datetime
import pkg_resources
from pylons import c, g, request
from paste.deploy.converters import asbool
from tg import expose, redirect, flash, validate, config
from tg.decorators import with_trailing_slash, without_trailing_slash
from webob import exc
from bson import ObjectId
from allura.app import Application, DefaultAdminController, SitemapEntry
from allura.lib import helpers as h
from allura import version
from allura import model as M
from allura.lib.security import has_access, require_access
from allura.lib.widgets import form_fields as ffw
from allura.lib import exceptions as forge_exc
from allura.lib import plugin
from allura.controllers import BaseController
from allura.lib.decorators import require_post
from . import widgets as aw
from allura.lib.widgets.project_list import ProjectScreenshots
log = logging.getLogger(__name__)
class W:
markdown_editor = ffw.MarkdownEdit()
label_edit = ffw.LabelEdit()
mount_delete = ffw.Lightbox(name='mount_delete',trigger='a.mount_delete')
admin_modal = ffw.Lightbox(name='admin_modal',trigger='a.admin_modal')
install_modal = ffw.Lightbox(name='install_modal',trigger='a.install_trig')
explain_export_modal = ffw.Lightbox(name='explain_export',trigger='#why_export')
group_card = aw.GroupCard()
permission_card = aw.PermissionCard()
group_settings = aw.GroupSettings()
new_group_settings = aw.NewGroupSettings()
screenshot_admin = aw.ScreenshotAdmin()
screenshot_list = ProjectScreenshots()
metadata_admin = aw.MetadataAdmin()
audit = aw.AuditLog()
page_list=ffw.PageList()
class AdminApp(Application):
'''This is the admin app. It is pretty much required for
a functioning allura project.
'''
__version__ = version.__version__
installable=False
_installable_tools = None
tool_label = 'admin'
icons={
24:'images/admin_24.png',
32:'images/admin_32.png',
48:'images/admin_48.png'
}
def __init__(self, project, config):
Application.__init__(self, project, config)
self.root = ProjectAdminController()
self.admin = AdminAppAdminController(self)
self.templates = pkg_resources.resource_filename('allura.ext.admin', 'templates')
self.sitemap = [ SitemapEntry('Admin','.')]
def is_visible_to(self, user):
'''Whether the user can view the app.'''
return has_access(c.project, 'create')(user=user)
@staticmethod
def installable_tools_for(project):
cls = AdminApp
if cls._installable_tools is None:
tools = [dict(name=k, app=v) for k,v in g.entry_points['tool'].iteritems()]
tools.sort(key=lambda t:(t['app'].status_int(), t['app'].ordinal))
cls._installable_tools = [ t for t in tools if t['app'].installable ]
return [ t for t in cls._installable_tools
if t['app'].status in project.allowed_tool_status ]
def main_menu(self):
'''Apps should provide their entries to be added to the main nav
:return: a list of :class:`SitemapEntries <allura.app.SitemapEntry>`
'''
return [SitemapEntry('Admin', '.')]
@h.exceptionless([], log)
def sidebar_menu(self):
links = []
admin_url = c.project.url()+'admin/'
if c.project.is_nbhd_project:
links.append(SitemapEntry('Add Project', c.project.url()+'add_project', ui_icon=g.icons['plus']))
nbhd_admin_url = c.project.neighborhood.url()+'_admin/'
links = links + [
SitemapEntry('Neighborhood'),
SitemapEntry('Overview', nbhd_admin_url+'overview'),
SitemapEntry('Awards', nbhd_admin_url+'accolades')]
else:
links += [SitemapEntry('Metadata', admin_url+'overview'),]
if c.project.neighborhood.name != "Users":
links += [
SitemapEntry('Screenshots', admin_url+'screenshots'),
SitemapEntry('Categorization', admin_url+'trove')
]
links.append(SitemapEntry('Tools', admin_url+'tools'))
if c.project.is_root and has_access(c.project, 'admin')():
links.append(SitemapEntry('User Permissions', admin_url+'groups/'))
if not c.project.is_root and has_access(c.project, 'admin')():
links.append(SitemapEntry('Permissions', admin_url+'permissions/'))
if len(c.project.neighborhood_invitations):
links.append(SitemapEntry('Invitation(s)', admin_url+'invitations'))
links.append(SitemapEntry('Audit Trail', admin_url+ 'audit/'))
if c.project.is_nbhd_project:
links.append(SitemapEntry('Statistics', nbhd_admin_url+ 'stats/'))
links.append(None)
links.append(SitemapEntry('Help', nbhd_admin_url+ 'help/'))
return links
def admin_menu(self):
return []
def install(self, project):
pass
class ProjectAdminController(BaseController):
def _check_security(self):
require_access(c.project, 'admin')
def __init__(self):
self.permissions = PermissionsController()
self.groups = GroupsController()
self.audit = AuditController()
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_admin.html')
def index(self, **kw):
return dict()
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_invitations.html')
def invitations(self):
return dict()
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_overview.html')
def overview(self, **kw):
c.markdown_editor = W.markdown_editor
c.metadata_admin = W.metadata_admin
c.explain_export_modal = W.explain_export_modal
show_export_control = asbool(config.get('show_export_control', False))
allow_project_delete = asbool(config.get('allow_project_delete', True))
explain_export_text = '''The purpose of this section is to determine whether your project is subject to the provisions of the
US Export Administration Regulations. You should consult section 734.4 and Supplement 2 to Part 734 for information on such items
and the calculation of U.S. controlled content.
<a href="http://www.bis.doc.gov/encryption/default.htm" target="_blank">http://www.bis.doc.gov/encryption/default.htm</a>'''
if 'us_export_contact' in config:
explain_export_text += 'If you have additional questions, please contact <a href="mailto:{contact}">{contact}</a>.'.format(
contact=config['us_export_contact']
)
return dict(show_export_control=show_export_control,
allow_project_delete=allow_project_delete,
explain_export_text=explain_export_text)
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_screenshots.html')
def screenshots(self, **kw):
c.screenshot_admin = W.screenshot_admin
c.screenshot_list = W.screenshot_list
return dict()
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_trove.html')
def trove(self):
c.label_edit = W.label_edit
base_troves = M.TroveCategory.query.find(dict(trove_parent_id=0)).sort('fullname').all()
topic_trove = M.TroveCategory.query.get(trove_parent_id=0,shortname='topic')
license_trove = M.TroveCategory.query.get(trove_parent_id=0,shortname='license')
return dict(base_troves=base_troves,license_trove=license_trove,topic_trove=topic_trove)
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_tools.html')
def tools(self, **kw):
c.markdown_editor = W.markdown_editor
c.label_edit = W.label_edit
c.mount_delete = W.mount_delete
c.admin_modal = W.admin_modal
c.install_modal = W.install_modal
mounts = c.project.ordered_mounts()
return dict(
mounts=mounts,
installable_tools=AdminApp.installable_tools_for(c.project),
roles=M.ProjectRole.query.find(dict(project_id=c.project.root_project._id)).sort('_id').all(),
categories=M.ProjectCategory.query.find(dict(parent_id=None)).sort('label').all())
@expose()
@require_post()
def update_labels(self, labels=None, labels_old=None, **kw):
require_access(c.project, 'admin')
c.project.labels = labels.split(',')
M.AuditLog.log('updated labels')
redirect('trove')
@without_trailing_slash
@expose()
def clone(self,
repo_type=None, source_url=None,
mount_point=None, mount_label=None,
**kw):
require_access(c.project, 'admin')
if repo_type is None:
return (
'<form method="get">'
'<input name="repo_type" value="Git">'
'<input name="source_url">'
'<input type="submit">'
'</form>')
for ep in pkg_resources.iter_entry_points('allura', repo_type):
break
if ep is None or source_url is None:
raise exc.HTTPNotFound
h.log_action(log, 'install tool').info(
'clone repo from %s', source_url,
meta=dict(tool_type=repo_type, mount_point=mount_point, mount_label=mount_label))
c.project.install_app(
repo_type,
mount_point=mount_point,
mount_label=mount_label,
init_from_url=source_url)
M.AuditLog.log('Create repo as clone')
redirect('tools')
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_permissions.html')
def groups(self, **kw):
return dict()
@expose()
def _lookup(self, name, *remainder):
app = c.project.app_instance(name)
if app is None:
raise exc.HTTPNotFound, name
return app.admin, remainder
@expose()
@require_post()
@validate(W.metadata_admin, error_handler=overview)
def update(self, name=None,
short_description=None,
summary='',
icon=None,
category=None,
external_homepage='',
support_page='',
support_page_url='',
removal='',
moved_to_url='',
export_controlled=False,
export_control_type=None,
tracking_id='',
**kw):
require_access(c.project, 'update')
if removal != c.project.removal:
M.AuditLog.log('change project removal status to %s', removal)
h.log_action(log, 'change project removal status').info('')
c.project.removal = removal
c.project.removal_changed_date = datetime.utcnow()
if 'delete_icon' in kw:
M.ProjectFile.query.remove(dict(project_id=c.project._id, category='icon'))
M.AuditLog.log('remove project icon')
h.log_action(log, 'remove project icon').info('')
g.post_event('project_updated')
redirect('overview')
elif 'delete' in kw:
allow_project_delete = asbool(config.get('allow_project_delete', True))
if allow_project_delete or not c.project.is_root:
M.AuditLog.log('delete project')
h.log_action(log, 'delete project').info('')
plugin.ProjectRegistrationProvider.get().delete_project(c.project, c.user)
redirect('overview')
elif 'undelete' in kw:
h.log_action(log, 'undelete project').info('')
M.AuditLog.log('undelete project')
plugin.ProjectRegistrationProvider.get().undelete_project(c.project, c.user)
redirect('overview')
if name != c.project.name:
h.log_action(log, 'change project name').info('')
M.AuditLog.log('change project name to %s', name)
c.project.name = name
if short_description != c.project.short_description:
h.log_action(log, 'change project short description').info('')
M.AuditLog.log('change short description to %s', short_description)
c.project.short_description = short_description
if summary != c.project.summary:
h.log_action(log, 'change project summary').info('')
M.AuditLog.log('change summary to %s', summary)
c.project.summary = summary
category = category and ObjectId(category) or None
if category != c.project.category_id:
h.log_action(log, 'change project category').info('')
M.AuditLog.log('change category to %s', category)
c.project.category_id = category
if external_homepage != c.project.external_homepage:
h.log_action(log, 'change external home page').info('')
M.AuditLog.log('change external home page to %s', external_homepage)
c.project.external_homepage = external_homepage
if support_page != c.project.support_page:
h.log_action(log, 'change project support page').info('')
M.AuditLog.log('change project support page to %s', support_page)
c.project.support_page = support_page
if support_page_url != c.project.support_page_url:
h.log_action(log, 'change project support page url').info('')
M.AuditLog.log('change project support page url to %s', support_page_url)
c.project.support_page_url = support_page_url
if moved_to_url != c.project.moved_to_url:
h.log_action(log, 'change project moved to url').info('')
M.AuditLog.log('change project moved to url to %s', moved_to_url)
c.project.moved_to_url = moved_to_url
if export_controlled != c.project.export_controlled:
h.log_action(log, 'change project export controlled status').info('')
M.AuditLog.log('change project export controlled status to %s', export_controlled)
c.project.export_controlled = not not export_controlled
if not export_controlled:
export_control_type = None
if export_control_type != c.project.export_control_type:
h.log_action(log, 'change project export control type').info('')
M.AuditLog.log('change project export control type to %s', export_control_type)
c.project.export_control_type = export_control_type
if tracking_id != c.project.tracking_id:
h.log_action(log, 'change project tracking ID').info('')
M.AuditLog.log('change project tracking ID to %s', tracking_id)
c.project.tracking_id = tracking_id
if icon is not None and icon != '':
if c.project.icon:
M.ProjectFile.remove(dict(project_id=c.project._id, category='icon'))
M.AuditLog.log('update project icon')
M.ProjectFile.save_image(
icon.filename, icon.file, content_type=icon.type,
square=True, thumbnail_size=(48,48),
thumbnail_meta=dict(project_id=c.project._id,category='icon'))
g.post_event('project_updated')
redirect('overview')
def _add_trove(self, type, new_trove):
current_troves = getattr(c.project,'trove_%s'%type)
trove_obj = M.TroveCategory.query.get(trove_cat_id=int(new_trove))
error_msg = None
if type in ['license','audience','developmentstatus','language'] and len(current_troves) >= 6:
error_msg = 'You may not have more than 6 of this category.'
elif type in ['topic'] and len(current_troves) >= 3:
error_msg = 'You may not have more than 3 of this category.'
elif trove_obj is not None:
if trove_obj._id not in current_troves:
current_troves.append(trove_obj._id)
g.post_event('project_updated')
else:
error_msg = 'This category has already been assigned to the project.'
return (trove_obj, error_msg)
@expose('json:')
@require_post()
def add_trove_js(self, type, new_trove, **kw):
require_access(c.project, 'update')
trove_obj, error_msg = self._add_trove(type, new_trove)
return dict(trove_full_path = trove_obj.fullpath, trove_cat_id = trove_obj.trove_cat_id, error_msg=error_msg)
redirect('trove')
@expose()
@require_post()
def add_trove(self, type, new_trove, **kw):
require_access(c.project, 'update')
trove_obj, error_msg = self._add_trove(type, new_trove)
M.AuditLog.log('add trove %s: %s', type, trove_obj.fullpath)
if error_msg:
flash(error_msg,'error')
redirect('trove')
@expose()
@require_post()
def delete_trove(self, type, trove, **kw):
require_access(c.project, 'update')
trove_obj = M.TroveCategory.query.get(trove_cat_id=int(trove))
current_troves = getattr(c.project,'trove_%s'%type)
if trove_obj is not None and trove_obj._id in current_troves:
M.AuditLog.log('remove trove %s: %s', type, trove_obj.fullpath)
current_troves.remove(trove_obj._id)
g.post_event('project_updated')
redirect('trove')
@expose()
@require_post()
@validate(W.screenshot_admin)
def add_screenshot(self, screenshot=None, caption=None, **kw):
require_access(c.project, 'update')
if len(c.project.get_screenshots()) >= 6:
flash('You may not have more than 6 screenshots per project.','error')
elif screenshot is not None and screenshot != '':
M.AuditLog.log('add screenshot')
M.ProjectFile.save_image(
screenshot.filename, screenshot.file, content_type=screenshot.type,
save_original=True,
original_meta=dict(project_id=c.project._id,category='screenshot',caption=caption),
square=True, thumbnail_size=(150,150),
thumbnail_meta=dict(project_id=c.project._id,category='screenshot_thumb'))
g.post_event('project_updated')
redirect('screenshots')
@expose()
@require_post()
def delete_screenshot(self, id=None, **kw):
require_access(c.project, 'update')
if id is not None and id != '':
M.AuditLog.log('remove screenshot')
M.ProjectFile.query.remove(dict(project_id=c.project._id, _id=ObjectId(id)))
g.post_event('project_updated')
redirect('screenshots')
@expose()
@require_post()
def edit_screenshot(self, id=None, caption=None, **kw):
require_access(c.project, 'update')
if id is not None and id != '':
M.ProjectFile.query.get(project_id=c.project._id, _id=ObjectId(id)).caption=caption
g.post_event('project_updated')
redirect('screenshots')
@expose()
@require_post()
def join_neighborhood(self, nid):
require_access(c.project, 'admin')
if not nid:
n = M.Neighborhood.query.get(name='Projects')
c.project.neighborhood_id = n._id
flash('Joined %s' % n.name)
redirect(c.project.url() + 'admin/')
nid = ObjectId(str(nid))
if nid not in c.project.neighborhood_invitations:
flash('No invitation to that neighborhood', 'error')
redirect('.')
c.project.neighborhood_id = nid
n = M.Neighborhood.query.get(_id=nid)
flash('Joined %s' % n.name)
redirect('invitations')
@h.vardec
@expose()
@require_post()
def update_mount_order(self, subs=None, tools=None, **kw):
if subs:
for sp in subs:
p = M.Project.query.get(shortname=sp['shortname'],
neighborhood_id=c.project.neighborhood_id)
p.ordinal = int(sp['ordinal'])
if tools:
for p in tools:
c.project.app_config(p['mount_point']).options.ordinal = int(p['ordinal'])
redirect('tools')
@h.vardec
@expose()
@require_post()
def update_mounts(self, subproject=None, tool=None, new=None, **kw):
if subproject is None: subproject = []
if tool is None: tool = []
for sp in subproject:
p = M.Project.query.get(shortname=sp['shortname'],
neighborhood_id=c.project.neighborhood_id)
if sp.get('delete'):
require_access(c.project, 'admin')
M.AuditLog.log('delete subproject %s', sp['shortname'])
h.log_action(log, 'delete subproject').info(
'delete subproject %s', sp['shortname'],
meta=dict(name=sp['shortname']))
p.removal = 'deleted'
plugin.ProjectRegistrationProvider.get().delete_project(p, c.user)
elif not new:
M.AuditLog.log('update subproject %s', sp['shortname'])
p.name = sp['name']
p.ordinal = int(sp['ordinal'])
for p in tool:
if p.get('delete'):
require_access(c.project, 'admin')
M.AuditLog.log('uninstall tool %s', p['mount_point'])
h.log_action(log, 'uninstall tool').info(
'uninstall tool %s', p['mount_point'],
meta=dict(mount_point=p['mount_point']))
c.project.uninstall_app(p['mount_point'])
elif not new:
M.AuditLog.log('update tool %s', p['mount_point'])
options = c.project.app_config(p['mount_point']).options
options.mount_label = p['mount_label']
options.ordinal = int(p['ordinal'])
try:
if new and new.get('install'):
ep_name = new.get('ep_name', None)
if not ep_name:
require_access(c.project, 'create')
mount_point = new['mount_point'].lower() or h.nonce()
M.AuditLog.log('create subproject %s', mount_point)
h.log_action(log, 'create subproject').info(
'create subproject %s', mount_point,
meta=dict(mount_point=mount_point,name=new['mount_label']))
sp = c.project.new_subproject(mount_point)
sp.name = new['mount_label']
sp.ordinal = int(new['ordinal'])
else:
require_access(c.project, 'admin')
mount_point = new['mount_point'].lower() or ep_name.lower()
M.AuditLog.log('install tool %s', mount_point)
h.log_action(log, 'install tool').info(
'install tool %s', mount_point,
meta=dict(tool_type=ep_name, mount_point=mount_point, mount_label=new['mount_label']))
c.project.install_app(ep_name, mount_point, mount_label=new['mount_label'], ordinal=new['ordinal'])
except forge_exc.ForgeError, exc:
flash('%s: %s' % (exc.__class__.__name__, exc.args[0]),
'error')
g.post_event('project_updated')
redirect('tools')
class PermissionsController(BaseController):
def _check_security(self):
require_access(c.project, 'admin')
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_permissions.html')
def index(self, **kw):
c.card = W.permission_card
return dict(permissions=self._index_permissions())
@without_trailing_slash
@expose()
@h.vardec
@require_post()
def update(self, card=None, **kw):
permissions = self._index_permissions()
old_permissions = dict(permissions)
for args in card:
perm = args['id']
new_group_ids = args.get('new', [])
group_ids = args.get('value', [])
if isinstance(new_group_ids, basestring):
new_group_ids = [ new_group_ids ]
if isinstance(group_ids, basestring):
group_ids = [ group_ids ]
# make sure the admin group has the admin permission
if perm == 'admin':
if c.project.is_root:
pid = c.project._id
else:
pid = c.project.parent_id
admin_group_id = str(M.ProjectRole.query.get(project_id=pid, name='Admin')._id)
if admin_group_id not in group_ids + new_group_ids:
flash('You cannot remove the admin group from the admin permission.','warning')
group_ids.append(admin_group_id)
permissions[perm] = []
role_ids = map(ObjectId, group_ids + new_group_ids)
permissions[perm] = role_ids
c.project.acl = []
for perm, role_ids in permissions.iteritems():
role_names = lambda ids: ','.join(sorted(
pr.name for pr in M.ProjectRole.query.find(dict(_id={'$in':ids}))))
old_role_ids = old_permissions.get(perm, [])
if old_role_ids != role_ids:
M.AuditLog.log('updated "%s" permissions: "%s" => "%s"',
perm,role_names(old_role_ids), role_names(role_ids))
c.project.acl += [M.ACE.allow(rid, perm) for rid in role_ids]
g.post_event('project_updated')
redirect('.')
def _index_permissions(self):
permissions = dict(
(p,[]) for p in c.project.permissions)
for ace in c.project.acl:
if ace.access == M.ACE.ALLOW:
permissions[ace.permission].append(ace.role_id)
return permissions
class GroupsController(BaseController):
def _check_security(self):
require_access(c.project, 'admin')
def _index_permissions(self):
permissions = dict(
(p,[]) for p in c.project.permissions)
for ace in c.project.acl:
if ace.access == M.ACE.ALLOW:
permissions[ace.permission].append(ace.role_id)
return permissions
def _map_group_permissions(self):
roles = c.project.named_roles
permissions=self._index_permissions()
permissions_by_role = dict()
auth_role = M.ProjectRole.authenticated()
anon_role = M.ProjectRole.anonymous()
for role in roles+[auth_role, anon_role]:
permissions_by_role[str(role._id)] = []
for perm in permissions:
perm_info = dict(has="no", text="Does not have permission %s" % perm, name=perm)
role_ids = permissions[perm]
if role._id in role_ids:
perm_info['text'] = "Has permission %s" % perm
perm_info['has'] = "yes"
else:
for r in role.child_roles():
if r._id in role_ids:
perm_info['text'] = "Inherited permission %s from %s" % (perm, r.name)
perm_info['has'] = "inherit"
break
if perm_info['has'] == "no":
if anon_role._id in role_ids:
perm_info['text'] = "Inherited permission %s from Anonymous" % perm
perm_info['has'] = "inherit"
elif auth_role._id in role_ids and role != anon_role:
perm_info['text'] = "Inherited permission %s from Authenticated" % perm
perm_info['has'] = "inherit"
permissions_by_role[str(role._id)].append(perm_info)
return permissions_by_role
@without_trailing_slash
@expose()
@h.vardec
def delete_group(self, group_name, **kw):
role = M.ProjectRole.by_name(group_name)
if not role:
flash('Group "%s" does not exist.' % group_name, 'error')
else:
role.delete()
M.AuditLog.log('delete group %s', group_name)
flash('Group "%s" deleted successfully.' % group_name)
g.post_event('project_updated')
redirect('.')
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_groups.html')
def index(self, **kw):
c.admin_modal = W.admin_modal
c.card = W.group_card
permissions_by_role = self._map_group_permissions()
auth_role = M.ProjectRole.authenticated()
anon_role = M.ProjectRole.anonymous()
roles = c.project.named_roles
roles.append(None)
return dict(roles=roles, permissions_by_role=permissions_by_role,
auth_role=auth_role, anon_role=anon_role)
@without_trailing_slash
@expose('json:')
@require_post()
@h.vardec
def change_perm(self, role_id, permission, allow="true", **kw):
if allow=="true":
M.AuditLog.log('granted permission %s to group with id %s', permission, role_id)
c.project.acl.append(M.ACE.allow(ObjectId(role_id), permission))
else:
admin_group_id = str(M.ProjectRole.by_name('Admin')._id)
if admin_group_id == role_id and permission == 'admin':
return dict(error='You cannot remove the admin permission from the admin group.')
M.AuditLog.log('revoked permission %s from group with id %s', permission, role_id)
c.project.acl.remove(M.ACE.allow(ObjectId(role_id), permission))
g.post_event('project_updated')
return self._map_group_permissions()
@without_trailing_slash
@expose('json:')
@require_post()
@h.vardec
def add_user(self, role_id, username, **kw):
if not username or username=='*anonymous':
return dict(error='You must choose a user to add.')
group = M.ProjectRole.query.get(_id=ObjectId(role_id))
user = M.User.by_username(username.strip())
if not group:
return dict(error='Could not find group with id %s' % role_id)
if not user:
return dict(error='User %s not found' % username)
if group._id in user.project_role().roles:
return dict(error='%s (%s) is already in the group %s.' % (user.display_name, username, group.name))
M.AuditLog.log('add user %s to %s', username, group.name)
user.project_role().roles.append(group._id)
g.post_event('project_updated')
return dict(username=username, displayname=user.display_name)
@without_trailing_slash
@expose('json:')
@require_post()
@h.vardec
def remove_user(self, role_id, username, **kw):
group = M.ProjectRole.query.get(_id=ObjectId(role_id))
user = M.User.by_username(username.strip())
if group.name == 'Admin' and len(group.users_with_role()) == 1:
return dict(error='You must have at least one user with the Admin role.')
if not group:
return dict(error='Could not find group with id %s' % role_id)
if not user:
return dict(error='User %s not found' % username)
if group._id not in user.project_role().roles:
return dict(error='%s (%s) is not in the group %s.' % (user.display_name, username, group.name))
M.AuditLog.log('remove user %s from %s', username, group.name)
user.project_role().roles.remove(group._id)
g.post_event('project_updated')
return dict()
@without_trailing_slash
@expose()
@require_post()
@h.vardec
def update(self, card=None, **kw):
for pr in card:
group = M.ProjectRole.query.get(_id=ObjectId(pr['id']))
assert group.project == c.project, 'Security violation'
user_ids = pr.get('value', [])
new_users = pr.get('new', [])
if isinstance(user_ids, basestring):
user_ids = [ user_ids ]
if isinstance(new_users, basestring):
new_users = [ new_users ]
# Handle new users in groups
user_added = False
for username in new_users:
user = M.User.by_username(username.strip())
if not user:
flash('User %s not found' % username, 'error')
redirect('.')
if not user._id:
continue # never add anon users to groups
M.AuditLog.log('add user %s to %s', username, group.name)
user.project_role().roles.append(group._id)
user_added = True
# Make sure we aren't removing all users from the Admin group
if group.name == u'Admin' and not (user_ids or user_added):
flash('You must have at least one user with the Admin role.',
'warning')
redirect('.')
# Handle users removed from groups
user_ids = set(
uid and ObjectId(uid)
for uid in user_ids)
for role in M.ProjectRole.query.find(dict(user_id={'$ne':None}, roles=group._id)):
if role.user_id and role.user_id not in user_ids:
role.roles = [ rid for rid in role.roles if rid != group._id ]
M.AuditLog.log('remove user %s from %s', role.user.username, group.name)
g.post_event('project_updated')
redirect('.')
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_group.html')
def new(self):
c.form = W.new_group_settings
return dict(
group=None,
show_settings=True,
action="create")
@expose()
@require_post()
@validate(W.new_group_settings)
@h.vardec
def create(self, name=None, **kw):
if M.ProjectRole.by_name(name):
flash('%s already exists' % name, 'error')
else:
M.ProjectRole(project_id=c.project._id, name=name)
M.AuditLog.log('create group %s', name)
g.post_event('project_updated')
redirect('.')
@expose()
def _lookup(self, name, *remainder):
return GroupController(name), remainder
class GroupController(BaseController):
def __init__(self, name):
self._group = M.ProjectRole.query.get(_id=ObjectId(name))
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_group.html')
def index(self):
if self._group.name in ('Admin', 'Developer', 'Member'):
show_settings = False
action = None
else:
show_settings = True
action = self._group.settings_href + 'update'
c.form = W.group_settings
return dict(
group=self._group,
show_settings=show_settings,
action=action)
@expose()
@h.vardec
@require_post()
@validate(W.group_settings)
def update(self, _id=None, delete=None, name=None, **kw):
pr = M.ProjectRole.by_name(name)
if pr and pr._id != _id._id:
flash('%s already exists' % name, 'error')
redirect('..')
if delete:
_id.delete()
M.AuditLog.log('delete group %s', _id.name)
flash('%s deleted' % name)
redirect('..')
M.AuditLog.log('update group name %s=>%s', _id.name, name)
_id.name = name
flash('%s updated' % name)
redirect('..')
class AuditController(BaseController):
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/audit.html')
def index(self, limit=10, page=0, **kwargs):
limit = int(limit)
page = int(page)
count = M.AuditLog.query.find(dict(project_id=c.project._id)).count()
q = M.AuditLog.query.find(dict(project_id=c.project._id))
q = q.sort('timestamp', -1)
q = q.skip(page * limit)
if count > limit:
q = q.limit(limit)
else:
limit=count
c.widget = W.audit
return dict(
entries=q.all(),
limit=limit,
page=page,
count=count)
class AdminAppAdminController(DefaultAdminController):
'''Administer the admin app'''
pass
| apache-2.0 | -1,951,223,242,091,528,700 | 42.143535 | 137 | 0.581743 | false | 3.840144 | false | false | false |
bat-serjo/vivisect | vqt/application.py | 1 | 6519 | import os
import logging
from PyQt5 import QtCore
from PyQt5.QtWidgets import *
import vqt.cli as vq_cli
import vqt.main as vq_main
import vqt.saveable as vq_save
import vqt.hotkeys as vq_hotkeys
import vqt.menubuilder as vq_menu
from vqt.saveable import compat_isNone
logger = logging.getLogger(__name__)
class VQDockWidget(vq_hotkeys.HotKeyMixin, QDockWidget):
def __init__(self, parent):
QDockWidget.__init__(self, parent)
vq_hotkeys.HotKeyMixin.__init__(self)
self.addHotKey('ctrl+enter', 'mem:undockmaximize')
self.addHotKeyTarget('mem:undockmaximize', self._hotkey_undock_maximize)
self.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
def vqSaveState(self, settings, name, stub=''):
wid = self.widget()
if isinstance(wid, vq_save.SaveableWidget):
return wid.vqSaveState(settings, name, stub)
def vqRestoreState(self, settings, name, stub=''):
wid = self.widget()
if isinstance(wid, vq_save.SaveableWidget):
return wid.vqRestoreState(settings, name, stub)
def setWidget(self, widget):
# If he sets his window title, we want to...
self.setWindowTitle(widget.windowTitle())
widget.setWindowTitle = self.setWindowTitle
QDockWidget.setWidget(self, widget)
def closeEvent(self, event):
self.hide()
w = self.widget()
w.setParent(None)
w.close()
self.parent().vqRemoveDockWidget(self)
event.accept()
def _hotkey_undock_maximize(self):
# if docked, undock
if not self.isFloating():
self.setFloating(1)
# if not maximized, maximize
if not self.isMaximized():
self.showMaximized()
else:
# else dock
self.showNormal()
self.setFloating(False)
else:
# else dock
self.showNormal()
self.setFloating(False)
self.show()
self.raise_()
class VQMainCmdWindow(vq_hotkeys.HotKeyMixin, QMainWindow):
'''
A base class for application window's to inherit from.
'''
__cli_widget_class__ = vq_cli.VQCli
def __init__(self, appname, cmd, **kwargs):
super(QMainWindow, self).__init__(**kwargs)
vq_hotkeys.HotKeyMixin.__init__(self)
self._vq_appname = appname
self._vq_dockwidgets = []
self._vq_settings = QtCore.QSettings('invisigoth', application=appname, parent=self)
self._vq_histfile = os.path.join(os.path.expanduser('~'), '.%s_history' % appname)
self._dock_classes = {}
self.vqInitDockWidgetClasses()
self._vq_mbar = vq_menu.VQMenuBar()
self.setMenuBar(self._vq_mbar)
# AnimatedDocks, AllowNestedDocks, AllowTabbedDocks, ForceTabbedDocks, VerticalTabs
self.setDockOptions(self.AnimatedDocks | self.AllowTabbedDocks)
self._vq_cli = self.__cli_widget_class__(cmd)
self._vq_cli.input.loadHistory(self._vq_histfile)
self._vq_cli.sigCliQuit.connect( self.close )
self.setCentralWidget(self._vq_cli)
self.vqRestoreGuiSettings(self._vq_settings)
def vqAddMenuField(self, fname, callback, args=()):
self._vq_mbar.addField(fname, callback, args=args)
def vqAddDynMenu(self, fname, callback):
self._vq_mbar.addDynMenu(fname, callback)
def vqInitDockWidgetClasses(self):
# apps can over-ride
pass
def vqAddDockWidgetClass(self, cls, args=()):
self._dock_classes[cls.__name__] = (cls, args)
def vqBuildDockWidget(self, clsname, floating=False, area=QtCore.Qt.TopDockWidgetArea):
res = self._dock_classes.get(clsname)
if res is None:
logger.error('vqBuildDockWidget Failed For: %s', clsname)
return
cls, args = res
obj = cls(*args)
return self.vqDockWidget(obj, area, floating=floating), obj
def vqRestoreGuiSettings(self, settings, stub=''):
dwcls = settings.value('DockClasses')
if not compat_isNone(dwcls):
for i, clsname in enumerate(dwcls):
name = 'VQDockWidget%d' % i
try:
tup = self.vqBuildDockWidget(str(clsname), floating=False)
if tup is not None:
d, obj = tup
d.setObjectName(name)
d.vqRestoreState(settings, name, stub)
d.show()
except Exception as e:
logger.error('Error Building: %s: %s', clsname, e)
# Once dock widgets are loaded, we can restoreState
state = settings.value('DockState')
if not compat_isNone(state):
self.restoreState(state)
geom = settings.value('DockGeometry')
if not compat_isNone(geom):
self.restoreGeometry(geom)
# Just get all the resize activities done...
vq_main.eatevents()
for w in self.vqGetDockWidgets():
w.show()
return True
def vqSaveGuiSettings(self, settings, stub=''):
dock_classes = []
# Enumerate the current dock windows and set
# their names by their list order...
for i, w in enumerate(self.vqGetDockWidgets()):
widget = w.widget()
dock_classes.append(widget.__class__.__name__)
name = 'VQDockWidget%d' % i
w.setObjectName(name)
w.vqSaveState(settings,name,stub)
settings.setValue('DockClasses', dock_classes)
settings.setValue('DockGeometry', self.saveGeometry())
settings.setValue('DockState', self.saveState())
def closeEvent(self, event):
self.vqSaveGuiSettings(self._vq_settings)
self._vq_cli.input.saveHistory(self._vq_histfile)
QMainWindow.closeEvent(self, event)
def vqGetDockWidgets(self):
return list(self._vq_dockwidgets)
def vqClearDockWidgets(self):
for wid in self.vqGetDockWidgets():
wid.close()
def vqRemoveDockWidget(self, widget):
self._vq_dockwidgets.remove(widget)
self.removeDockWidget(widget)
def vqDockWidget(self, widget, area=QtCore.Qt.TopDockWidgetArea, floating=False):
d = VQDockWidget(self)
d.setWidget(widget)
d.setFloating(floating)
self.addDockWidget(area, d)
self._vq_dockwidgets.append(d)
self.restoreDockWidget(d)
d.show()
return d
| apache-2.0 | -1,507,048,536,738,605,300 | 31.272277 | 92 | 0.609296 | false | 3.650056 | false | false | false |
ioam/param | tests/API1/testparamdepends.py | 1 | 2200 | """
Unit test for param.depends.
"""
import param
from . import API1TestCase
class TestParamDepends(API1TestCase):
def setUp(self):
class P(param.Parameterized):
a = param.Parameter()
b = param.Parameter()
@param.depends('a')
def single_parameter(self):
pass
@param.depends('a:constant')
def constant(self):
pass
@param.depends('a.param')
def nested(self):
pass
self.P = P
def test_param_depends_instance(self):
p = self.P()
pinfos = p.param.params_depended_on('single_parameter')
self.assertEqual(len(pinfos), 1)
pinfo = pinfos[0]
self.assertIs(pinfo.cls, self.P)
self.assertIs(pinfo.inst, p)
self.assertEqual(pinfo.name, 'a')
self.assertEqual(pinfo.what, 'value')
def test_param_depends_class(self):
pinfos = self.P.param.params_depended_on('single_parameter')
self.assertEqual(len(pinfos), 1)
pinfo = pinfos[0]
self.assertIs(pinfo.cls, self.P)
self.assertIs(pinfo.inst, None)
self.assertEqual(pinfo.name, 'a')
self.assertEqual(pinfo.what, 'value')
def test_param_depends_constant(self):
pinfos = self.P.param.params_depended_on('constant')
self.assertEqual(len(pinfos), 1)
pinfo = pinfos[0]
self.assertIs(pinfo.cls, self.P)
self.assertIs(pinfo.inst, None)
self.assertEqual(pinfo.name, 'a')
self.assertEqual(pinfo.what, 'constant')
def test_param_depends_nested(self):
inst = self.P(a=self.P())
pinfos = inst.param.params_depended_on('nested')
self.assertEqual(len(pinfos), 4)
pinfos = {(pi.inst, pi.name): pi for pi in pinfos}
pinfo = pinfos[(inst, 'a')]
self.assertIs(pinfo.cls, self.P)
self.assertIs(pinfo.inst, inst)
self.assertEqual(pinfo.name, 'a')
self.assertEqual(pinfo.what, 'value')
for p in ['name', 'a', 'b']:
info = pinfos[(inst.a, p)]
self.assertEqual(info.name, p)
self.assertIs(info.inst, inst.a)
| bsd-3-clause | 8,995,908,249,948,755,000 | 29.555556 | 68 | 0.572727 | false | 3.481013 | true | false | false |
google/eclipse2017 | scripts/get_user_ids.py | 1 | 1785 | #
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print user ids matching email addresses."""
import argparse
from google.cloud import datastore
import common.service_account as sa
DEFAULT_PROJECT_ID = 'eclipse-2017-test-147301'
DEFAULT_EMAIL_ADDRESS_FILE = 'email_addresses.txt'
def get_arguments():
parser = argparse.ArgumentParser(description='Print user ids matching email addresses.')
parser.add_argument('--project_id', type=str, default=DEFAULT_PROJECT_ID)
parser.add_argument('--email_address_file', type=str, default=DEFAULT_EMAIL_ADDRESS_FILE)
return parser.parse_args()
def main():
args = get_arguments()
client = datastore.Client(project=args.project_id)
addresses = [address.strip() for address in open(args.email_address_file).readlines()]
# Can't find a way to query a collection of records matching different email addresses.
for email in addresses:
query = client.query(kind="User")
query.add_filter('email', '=', email)
entities = query.fetch()
l = list(entities)
if l == []:
print "No match for", email
else:
for entity in l:
print entity.key.name, entity['email']
if __name__ == '__main__':
main()
| apache-2.0 | 7,845,751,704,223,151,000 | 34.7 | 93 | 0.693557 | false | 3.993289 | false | false | false |
sebinthomas/pyvarnam | pyvarnam/varnam_defs.py | 1 | 3346 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Varnam library functions list
"""
# Varnam library functions list
from .utils import *
import ctypes as C
#REMINDER: Change this for every major release of varnam
LIBVARNAM_MAJOR_VERSION = 3
VARNAM_PATHS = ['','..','/usr/local/lib', '/usr/local/lib/i386-linux-gnu', '/usr/local/lib/x86_64-linux-gnu', '/usr/lib/i386-linux-gnu', '/usr/lib/x86_64-linux-gnu', '/usr/lib']
VARNAM_NAMES = ['libvarnam.so', "libvarnam.so.{0}".format(LIBVARNAM_MAJOR_VERSION), 'libvarnam.dylib', 'varnam.dll']
class VarnamHandle(C.Structure):
_fields_ = [('scheme_file', STRING),
('suggestions_file', STRING),
('internal', VOID)]
VARNAM_PTR = C.POINTER(VarnamHandle)
class Varray(C.Structure):
_fields_ = [('memory', C.POINTER(VOID)),
('allocated', C.c_size_t),
('used', C.c_size_t),
('index', INT)]
VARRAY_PTR = C.POINTER(Varray)
class VlearnStatus(C.Structure):
_fields_ = [('total_words', INT),
('failed', INT)]
VLEARN_STATUS_PTR = C.POINTER(VlearnStatus)
#TODO: do we need this ?
class Token(C.Structure):
_fields_ = [('id', INT),
('type', INT),
('match_type', INT),
('priority', INT),
('accept_condition', INT),
('flags', INT),
('tag', STRING),
('pattern', STRING),
('value1', STRING),
('value2', STRING),
('value3', STRING)]
class Word(C.Structure):
_fields_ = [('text', STRING),
('confidence', INT)]
FUNCTION_LIST = [
['varnam_init', [STRING, C.POINTER(VARNAM_PTR), C.POINTER(STRING)], INT],
['varnam_init_from_id', [STRING, C.POINTER(VARNAM_PTR), C.POINTER(STRING)], INT],
['varnam_version', [], STRING],
['varnam_transliterate', [VARNAM_PTR, STRING, C.POINTER(VARRAY_PTR)], INT],
['varnam_reverse_transliterate', [VARNAM_PTR, STRING, C.POINTER(STRING)], INT],
['varnam_detect_lang', [VARNAM_PTR, STRING], INT],
['varnam_learn', [VARNAM_PTR, STRING], INT],
['varnam_train', [VARNAM_PTR, STRING, STRING], INT],
['varnam_learn_from_file', [VARNAM_PTR, STRING, VLEARN_STATUS_PTR, VOID, VOID], INT],
['varnam_create_token', [VARNAM_PTR, STRING, STRING, STRING, STRING, STRING, INT, INT, INT, INT, INT], INT],
['varnam_set_scheme_details', [VARNAM_PTR, STRING, STRING, STRING, STRING, STRING], INT],
['varnam_get_last_error', [VARNAM_PTR], STRING],
['varnam_flush_buffer', [VARNAM_PTR], INT],
['varnam_config', [], INT],
['varnam_get_all_tokens', [VARNAM_PTR, INT, C.POINTER(VARRAY_PTR)], INT],
['varray_get', [VARRAY_PTR, INT], VOID],
['varray_length', [VARRAY_PTR], INT],
['varnam_export_words', [VARNAM_PTR, INT, STRING, INT, VOID], INT],
['varnam_import_learnings_from_file', [VARNAM_PTR, STRING, VOID], INT],
['varnam_destroy', [VARNAM_PTR], VOID],
['varnam_get_scheme_file', [VARNAM_PTR], STRING],
['varnam_get_suggestions_file', [VARNAM_PTR], STRING],
['varnam_create_token', [VARNAM_PTR, STRING, STRING, STRING, STRING, STRING, INT, INT, INT, INT, INT], INT],
['varnam_config']]
# TODO: varnam_learn_from_file uses a callback. So does some other function.
# TODO: varnam_config uses a varargs function.
| mit | 1,290,528,990,029,869,300 | 37.906977 | 177 | 0.59205 | false | 3.036298 | false | false | false |
rgayon/plaso | plaso/output/shared_dsv.py | 1 | 4484 | # -*- coding: utf-8 -*-
"""Shared functionality for delimiter separated values output modules."""
from __future__ import unicode_literals
from plaso.output import formatting_helper
from plaso.output import interface
class DSVEventFormattingHelper(formatting_helper.EventFormattingHelper):
"""Delimiter separated values output module event formatting helper."""
def __init__(
self, output_mediator, field_formatting_helper, field_names,
field_delimiter=','):
"""Initializes a delimiter separated values event formatting helper.
Args:
output_mediator (OutputMediator): output mediator.
field_formatting_helper (FieldFormattingHelper): field formatting helper.
field_names (list[str]): names of the fields to output.
field_delimiter (Optional[str]): field delimiter.
"""
super(DSVEventFormattingHelper, self).__init__(output_mediator)
self._field_delimiter = field_delimiter
self._field_names = field_names
self._field_formatting_helper = field_formatting_helper
def _SanitizeField(self, field):
"""Sanitizes a field for output.
This method replaces any field delimiters with a space.
Args:
field (str): value of the field to sanitize.
Returns:
str: sanitized value of the field.
"""
if self._field_delimiter and isinstance(field, str):
return field.replace(self._field_delimiter, ' ')
return field
def GetFormattedEvent(self, event, event_data, event_data_stream, event_tag):
"""Retrieves a string representation of the event.
Args:
event (EventObject): event.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
event_tag (EventTag): event tag.
Returns:
str: string representation of the event.
"""
field_values = []
for field_name in self._field_names:
field_value = self._field_formatting_helper.GetFormattedField(
field_name, event, event_data, event_data_stream, event_tag)
field_value = self._SanitizeField(field_value)
field_values.append(field_value)
return self._field_delimiter.join(field_values)
def GetFormattedFieldNames(self):
"""Retrieves a string representation of the field names.
Returns:
str: string representation of the field names.
"""
return self._field_delimiter.join(self._field_names)
def SetFieldDelimiter(self, field_delimiter):
"""Sets the field delimiter.
Args:
field_delimiter (str): field delimiter.
"""
self._field_delimiter = field_delimiter
def SetFields(self, field_names):
"""Sets the names of the fields to output.
Args:
field_names (list[str]): names of the fields to output.
"""
self._field_names = field_names
class DSVOutputModule(interface.LinearOutputModule):
"""Shared functionality for delimiter separated values output modules."""
def __init__(
self, output_mediator, field_formatting_helper, names, delimiter=',',
header=None):
"""Initializes a delimiter separated values output module.
Args:
output_mediator (OutputMediator): an output mediator.
field_formatting_helper (FieldFormattingHelper): field formatting helper.
names (list[str]): names of the fields to output.
delimiter (Optional[str]): field delimiter.
header (Optional[str]): header, where None will have WriteHeader
generate a header from the field names.
"""
event_formatting_helper = DSVEventFormattingHelper(
output_mediator, field_formatting_helper, names,
field_delimiter=delimiter)
super(DSVOutputModule, self).__init__(
output_mediator, event_formatting_helper)
self._header = header
def SetFieldDelimiter(self, field_delimiter):
"""Sets the field delimiter.
Args:
field_delimiter (str): field delimiter.
"""
self._event_formatting_helper.SetFieldDelimiter(field_delimiter)
def SetFields(self, field_names):
"""Sets the names of the fields to output.
Args:
field_names (list[str]): names of the fields to output.
"""
self._event_formatting_helper.SetFields(field_names)
def WriteHeader(self):
"""Writes the header to the output."""
if self._header:
output_text = self._header
else:
output_text = self._event_formatting_helper.GetFormattedFieldNames()
output_text = '{0:s}\n'.format(output_text)
self._output_writer.Write(output_text)
| apache-2.0 | -6,534,213,863,833,684,000 | 31.492754 | 79 | 0.690455 | false | 4.171163 | false | false | false |
ysasaki6023/NeuralNetworkStudy | cifar04/net.py | 1 | 3132 | #!/usr/bin/env python
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from chainer.utils import conv
class ImageProcessNetwork(chainer.Chain):
def __init__(self,
I_colors, I_Xunit, I_Yunit, F_unit,
N_PLayers = 4,
P0C_feature = 32,
P1C_feature = 32,
P2C_feature = 16,
P0C_filter = 3,
P1C_filter = 3,
P2C_filter = 3,
P0P_ksize = 2,
P1P_ksize = 2,
P2P_ksize = 2,
L1_dropout = 0.5,
L2_dropout = 0.0,
L2_unit = 500):
super(ImageProcessNetwork, self).__init__()
self.IsTrain = True
self.NPLayers = N_PLayers
self.NFeatures = [I_colors]
self.NFilter = [1]
self.NKsize = [1]
self.NImgPix = [(I_Xunit,I_Yunit)]
self.L1_dropout = L1_dropout
self.L2_dropout = L2_dropout
self.L2_unit = L2_unit
for iL in range(self.NPLayers):
## Set Variables
self.NFeatures.append(self.gradualVariable(iL,self.NPLayers,P0C_feature,P1C_feature,P2C_feature))
self.NFilter.append( self.gradualVariable(iL,self.NPLayers,P0C_filter ,P1C_filter ,P2C_filter ))
self.NKsize.append( self.gradualVariable(iL,self.NPLayers,P0P_ksize ,P1P_ksize ,P2P_ksize ))
## Update layers
self.NImgPix.append(
( conv.get_conv_outsize( self.NImgPix[-1][0], self.NKsize[-1], self.NKsize[-1], 0, cover_all = True),
conv.get_conv_outsize( self.NImgPix[-1][1], self.NKsize[-1], self.NKsize[-1], 0, cover_all = True)))
self.add_link("P%d"%iL,L.Convolution2D( self.NFeatures[-2], self.NFeatures[-1],
self.NFilter[-1] , pad=int(self.NFilter[-1]/2.)))
self.add_link("L1",L.Linear( self.NImgPix[-1][0] * self.NImgPix[-1][1] * self.NFeatures[-1] , L2_unit))
self.add_link("L2",L.Linear( L2_unit, F_unit))
return
def gradualVariable(self, cLayer, tLayer, val0, val1, val2):
pos = 0.5
if cLayer <= int(pos*tLayer): v0, v1, p0, p1, pc = val0, val1, 0, int(pos*tLayer), int( cLayer - 0 )
else : v0, v1, p0, p1, pc = val1, val2, int(pos*tLayer), tLayer-1, int( cLayer - int(pos*tLayer))
return int(float(v0) + (float(v1)-float(v0))/(float(p1)-float(p0))*float(pc))
def setTrainMode(self, IsTrain):
self.IsTrain = IsTrain
return
def __call__(self, x):
h = x
for iL in range(self.NPLayers):
h = self.__dict__["P%d"%iL](h)
h = F.local_response_normalization(h)
h = F.max_pooling_2d(F.relu(h), ksize=self.NKsize[iL+1], cover_all=True)
h = F.dropout(F.relu(self.L1(h)),ratio=self.L1_dropout,train=self.IsTrain)
h = F.dropout(F.relu(self.L2(h)),ratio=self.L2_dropout,train=self.IsTrain)
y = h
return y
| mit | 2,948,105,231,894,065,000 | 42.5 | 128 | 0.529055 | false | 2.960302 | false | false | false |
caltech-chimera/pychimera | scripts/multiphot.py | 1 | 9783 | #!/usr/bin/env python
"""
--------------------------------------------------------------------------
Routine to perform aperture photometry on CHIMERA science frames.
Usage: python fastphot.py [options] image coords
Authors:
Navtej Saini, Lee Rosenthal
Organization:
Caltech, Pasadena, CA, USA
Version:
7 January 2016 0.1 Initial implementation
9 February 2016 0.2 User input for photometric zero point
28 July 2017 0.3 Allow processing of multiple stars.
--------------------------------------------------------------------------
"""
import os, sys
import numpy as np, warnings
from StringIO import StringIO
from optparse import OptionParser
try:
import matplotlib.pylab as plt
except ImportError:
plot_flag = False
else:
try:
import seaborn
except ImportError:
pass
plot_flag = True
import chimera
def plotter(phot_data, nframes, exptime, outfile):
"""
Plot light curve.
Parameters
----------
phot_data : numpy array
Photometry array
nframes : int
Number of image cube frames
exptime : float
Kinetic or accumulation time
outfile : string
Name of the out png image
Returns
-------
None
"""
params = {'backend': 'ps',
'font.size': 10,
'axes.labelweight': 'medium',
'figure.dpi' : 300,
'savefig.dpi': 300,
'savefig.jpeg_quality': 100
}
plt.rcParams.update(params)
ts = np.linspace(0, nframes*exptime, nframes)
plt.figure(figsize=(6,4))
plt.title("Normalized Light Curve : %s" %phot_data[0]['DATETIME'].split('T')[0])
plt.xlabel("Time (secs)")
plt.ylabel("Normalized Flux")
plt.plot(ts, phot_data['FLUX_ADU']/np.mean(phot_data['FLUX_ADU']), "r-")
plt.savefig(outfile, dpi = 300, bbox_inches = "tight")
return
def process(infile, coords, method, inner_radius, outer_radius, cen_method, window_size, output, zmag):
"""
Entry point function to process science image.
Parameters
----------
infile : string
Science image or list of science images
coords : string
Input text file with coordinates of stars
method : string
FWHM of the stelar psf in pixels
inner_radius : float
Sky background sigma
outer_radius : int
Inner sky annulus radius in pixels
cen_method : string
Centroid method
window_size : int
Centroid finding window size in pixels
output : string
Output file name
zmag : float
Photometric zero point
Returns
-------
None
"""
print "FASTPHOT: CHIMERA Fast Aperture Photometry Routine"
inner_radius = float(inner_radius)
outer_radius = float(outer_radius)
# Check if input is a string of FITS images or a text file with file names
if infile[0] == "@":
infile = infile[1:]
if not os.path.exists(infile):
print "REGISTER: Not able to locate file %s" %infile
image_cubes = []
with open(infile, "r") as fd:
for line in fd.readlines():
if len(line) > 1:
image_cubes.append(line.replace("\n", ""))
else:
image_cubes = infile.split(",")
# Number of images
ncubes = len(image_cubes)
pos = np.loadtxt(coords, ndmin = 2)
nstars = len(pos)
total_phot_data = []
for i in range(ncubes):
sci_file = image_cubes[i]
print " Processing science image %s" %sci_file
# Read FITS image and star coordinate
image = chimera.fitsread(sci_file)
# Instantiate an Aperphot object
ap = chimera.Aperphot(sci_file, coords)
# Set fwhmpsf, sigma, annulus, dannulus and zmag
ap.method = method
ap.inner_radius = inner_radius
ap.outer_radius = outer_radius
if zmag != "":
ap.zmag = float(zmag)
# Determine nominal aperture radius for photometry
if i == 0:
nom_aper = ap.cog(window_size, cen_method)
print " Nominal aperture radius : %4.1f pixels" %nom_aper
# Perform aperture photometry on all the frames
dtype = [("DATETIME", "S25"),("XCEN", "f4"),("YCEN", "f4"),("MSKY", "f8"),("NSKY", "f8"),("AREA", "f8"),("FLUX_ADU", "f8"),("FLUX_ELEC", "f8"),("FERR", "f8"),("MAG", "f8")]
phot_data = np.zeros([nstars, ap.nframes], dtype = dtype)
for j in range(ap.nframes):
print " Processing frame number : %d" %(j+1)
objpos = chimera.recenter(image[j,:,:], pos, window_size, cen_method)
aperphot_data = ap.phot(image[j,:,:], objpos, nom_aper)
pos = np.copy(objpos)
phot_data[:,j]['DATETIME'] = ap.addtime(j * ap.kintime).isoformat()
phot_data[:,j]['XCEN'] = aperphot_data["xcenter_raw"]
phot_data[:,j]['YCEN'] = aperphot_data["ycenter_raw"]
phot_data[:,j]['MSKY'] = aperphot_data["msky"]
phot_data[:,j]['NSKY'] = aperphot_data["nsky"]
phot_data[:,j]['AREA'] = aperphot_data["area"]
phot_data[:,j]['FLUX_ADU'] = aperphot_data["flux"]
phot_data[:,j]['FLUX_ELEC'] = phot_data[:,j]['FLUX_ADU'] * ap.epadu
phot_data[:,j]['MAG'] = ap.zmag - 2.5 * np.log10(phot_data[:,j]['FLUX_ELEC']/ap.exptime)
# Calculate error in flux - using the formula
# err = sqrt(flux * gain + npix * (1 + (npix/nsky)) * (flux_sky * gain + R**2))
phot_data[:,j]['FERR'] = np.sqrt(phot_data[:,j]['FLUX_ELEC'] + phot_data[:,j]['AREA'] * (1 + phot_data[j]['AREA']/phot_data[j]['NSKY']) * (phot_data[j]['MSKY'] * ap.epadu + ap.readnoise**2))
total_phot_data.append(phot_data)
# Save photometry data in numpy binary format
print " Saving photometry data as numpy binary"
if output != "":
npy_outfile = output + ".npy"
else:
npy_outfile = sci_file.replace(".fits", ".phot.npy")
if os.path.exists(npy_outfile):
os.remove(npy_outfile)
#np.save(npy_outfile, phot_data)
# Plot first pass light curve
if plot_flag:
print " Plotting normalized light curve"
if output != "":
plt_outfile = output + ".png"
else:
plt_outfile = sci_file.replace(".fits", ".lc.png")
plotter(phot_data, ap.nframes, ap.kintime, plt_outfile)
# Convert the total_phot_data to array and reshape it
print ' Saving consolidated photometry data...'
total_phot_data_arr = np.concatenate(total_phot_data, axis=1)
# Save the array as npy file
if output != "":
np.save(output+"phot_total.npy", total_phot_data_arr)
else: np.save("phot_total.npy", total_phot_data_arr)
return
if __name__ == "__main__":
usage = "Usage: python %prog [options] sci_image coords"
description = "Description. Utility to perform fast aperture photometry in CHIMERA science images."
parser = OptionParser(usage = usage, version = "%prog 0.2", description = description)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default = False,
help = "print result messages to stdout"
)
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default = True,
help = "don't print result messages to stdout"
)
parser.add_option("-m", "--method", dest = "method",
action="store", metavar="METHOD", help = "Method to use for determining overlap between aperture and pixels (default is exact)",
default = "exact"
)
parser.add_option("-i", "--inner_radius", dest = "inner_radius",
action="store", metavar="INNER_RADIUS", help = "Inner radius of sky annlus in pixels (default is 14)",
default = 14
)
parser.add_option("-d", "--outer_radius", dest = "outer_radius",
action="store", metavar="OUTER_RADIUS", help = "Radius of sky annulus in pixels (default is 16)",
default = 16
)
parser.add_option("-c", "--cen_method", dest = "cen_method",
action="store", metavar="CEN_METHOD", help = "Centroid method (default is 2dg)",
default = "2dg"
)
parser.add_option("-w", "--window_size", dest = "window_size",
action="store", metavar="WINDOW_SIZE", help = "Window size for centroid (default is 35)",
default = 35
)
parser.add_option("-o", "--output", dest = "output",
action="store", metavar="OUTPUT", help = "Output file name",
default = ""
)
parser.add_option("-z", "--zmag", dest = "zmag",
action="store", metavar="ZMAG", help = "Photometric zeroo point",
default = ""
)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("FASTPHOT: Incorrect number of arguments")
# Check verbosity
if not options.verbose:
output = StringIO()
old_stdout = sys.stdout
sys.stdout = output
# Switch off warnings
warnings.filterwarnings('ignore')
process(args[0], args[1], options.method, options.inner_radius, options.outer_radius, options.cen_method, options.window_size, options.output, options.zmag)
# Reset verbosity
if not options.verbose:
sys.stdout = old_stdout
| mit | -4,409,292,525,545,918,500 | 32.618557 | 202 | 0.552898 | false | 3.705682 | false | false | false |
sunqm/pyscf | pyscf/scf/uhf_symm.py | 1 | 22155 | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Non-relativistic unrestricted Hartree-Fock with point group symmetry.
'''
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import symm
from pyscf.lib import logger
from pyscf.scf import hf_symm
from pyscf.scf import uhf
from pyscf.scf import chkfile
from pyscf import __config__
WITH_META_LOWDIN = getattr(__config__, 'scf_analyze_with_meta_lowdin', True)
MO_BASE = getattr(__config__, 'MO_BASE', 1)
def analyze(mf, verbose=logger.DEBUG, with_meta_lowdin=WITH_META_LOWDIN,
**kwargs):
from pyscf.lo import orth
from pyscf.tools import dump_mat
mol = mf.mol
if not mol.symmetry:
return uhf.analyze(mf, verbose, with_meta_lowdin, **kwargs)
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
mo_coeff = mf.mo_coeff
ovlp_ao = mf.get_ovlp()
log = logger.new_logger(mf, verbose)
if log.verbose >= logger.NOTE:
mf.dump_scf_summary(log)
nirrep = len(mol.irrep_id)
ovlp_ao = mf.get_ovlp()
orbsyma, orbsymb = mf.get_orbsym(mo_coeff, ovlp_ao)
orbsyma_in_d2h = numpy.asarray(orbsyma) % 10
orbsymb_in_d2h = numpy.asarray(orbsymb) % 10
tot_sym = 0
noccsa = [sum(orbsyma_in_d2h[mo_occ[0]>0]==ir) for ir in mol.irrep_id]
noccsb = [sum(orbsymb_in_d2h[mo_occ[1]>0]==ir) for ir in mol.irrep_id]
for i, ir in enumerate(mol.irrep_id):
if (noccsa[i]+noccsb[i]) % 2:
tot_sym ^= ir
if mol.groupname in ('Dooh', 'Coov', 'SO3'):
log.note('TODO: total wave-function symmetry for %s', mol.groupname)
else:
log.note('Wave-function symmetry = %s',
symm.irrep_id2name(mol.groupname, tot_sym))
log.note('alpha occupancy for each irrep: '+(' %4s'*nirrep),
*mol.irrep_name)
log.note(' '+(' %4d'*nirrep),
*noccsa)
log.note('beta occupancy for each irrep: '+(' %4s'*nirrep),
*mol.irrep_name)
log.note(' '+(' %4d'*nirrep),
*noccsb)
log.note('**** MO energy ****')
irname_full = {}
for k, ir in enumerate(mol.irrep_id):
irname_full[ir] = mol.irrep_name[k]
irorbcnt = {}
for k, j in enumerate(orbsyma):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
log.note('alpha MO #%d (%s #%d), energy= %.15g occ= %g',
k+MO_BASE, irname_full[j], irorbcnt[j],
mo_energy[0][k], mo_occ[0][k])
irorbcnt = {}
for k, j in enumerate(orbsymb):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
log.note('beta MO #%d (%s #%d), energy= %.15g occ= %g',
k+MO_BASE, irname_full[j], irorbcnt[j],
mo_energy[1][k], mo_occ[1][k])
if mf.verbose >= logger.DEBUG:
label = mol.ao_labels()
molabel = []
irorbcnt = {}
for k, j in enumerate(orbsyma):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
molabel.append('#%-d(%s #%d)' %
(k+MO_BASE, irname_full[j], irorbcnt[j]))
if with_meta_lowdin:
log.debug(' ** alpha MO coefficients (expansion on meta-Lowdin AOs) **')
orth_coeff = orth.orth_ao(mol, 'meta_lowdin', s=ovlp_ao)
c_inv = numpy.dot(orth_coeff.conj().T, ovlp_ao)
mo = c_inv.dot(mo_coeff[0])
else:
log.debug(' ** alpha MO coefficients (expansion on AOs) **')
mo = mo_coeff[0]
dump_mat.dump_rec(mf.stdout, mo, label, start=MO_BASE, **kwargs)
molabel = []
irorbcnt = {}
for k, j in enumerate(orbsymb):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
molabel.append('#%-d(%s #%d)' %
(k+MO_BASE, irname_full[j], irorbcnt[j]))
if with_meta_lowdin:
log.debug(' ** beta MO coefficients (expansion on meta-Lowdin AOs) **')
mo = c_inv.dot(mo_coeff[1])
else:
log.debug(' ** beta MO coefficients (expansion on AOs) **')
mo = mo_coeff[1]
dump_mat.dump_rec(mol.stdout, mo, label, molabel, start=MO_BASE, **kwargs)
dm = mf.make_rdm1(mo_coeff, mo_occ)
if with_meta_lowdin:
pop_and_charge = mf.mulliken_meta(mol, dm, s=ovlp_ao, verbose=log)
else:
pop_and_charge = mf.mulliken_pop(mol, dm, s=ovlp_ao, verbose=log)
dip = mf.dip_moment(mol, dm, verbose=log)
return pop_and_charge, dip
def get_irrep_nelec(mol, mo_coeff, mo_occ, s=None):
'''Alpha/beta electron numbers for each irreducible representation.
Args:
mol : an instance of :class:`Mole`
To provide irrep_id, and spin-adapted basis
mo_occ : a list of 1D ndarray
Regular occupancy, without grouping for irreps
mo_coeff : a list of 2D ndarray
Regular orbital coefficients, without grouping for irreps
Returns:
irrep_nelec : dict
The number of alpha/beta electrons for each irrep {'ir_name':(int,int), ...}.
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz', symmetry=True, charge=1, spin=1, verbose=0)
>>> mf = scf.UHF(mol)
>>> mf.scf()
-75.623975516256721
>>> scf.uhf_symm.get_irrep_nelec(mol, mf.mo_coeff, mf.mo_occ)
{'A1': (3, 3), 'A2': (0, 0), 'B1': (1, 1), 'B2': (1, 0)}
'''
if getattr(mo_coeff[0], 'orbsym', None) is not None:
orbsyma = mo_coeff[0].orbsym
else:
orbsyma = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb,
mo_coeff[0], s, False)
if getattr(mo_coeff[1], 'orbsym', None) is not None:
orbsymb = mo_coeff[1].orbsym
else:
orbsymb = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb,
mo_coeff[1], s, False)
orbsyma = numpy.array(orbsyma)
orbsymb = numpy.array(orbsymb)
irrep_nelec = dict([(mol.irrep_name[k], (int(sum(mo_occ[0][orbsyma==ir])),
int(sum(mo_occ[1][orbsymb==ir]))))
for k, ir in enumerate(mol.irrep_id)])
return irrep_nelec
def canonicalize(mf, mo_coeff, mo_occ, fock=None):
'''Canonicalization diagonalizes the UHF Fock matrix in occupied, virtual
subspaces separatedly (without change occupancy).
'''
mol = mf.mol
if not mol.symmetry:
return uhf.canonicalize(mf, mo_coeff, mo_occ, fock)
mo_occ = numpy.asarray(mo_occ)
assert(mo_occ.ndim == 2)
if fock is None:
dm = mf.make_rdm1(mo_coeff, mo_occ)
fock = mf.get_hcore() + mf.get_veff(mf.mol, dm)
occidxa = mo_occ[0] == 1
occidxb = mo_occ[1] == 1
viridxa = ~occidxa
viridxb = ~occidxb
mo = numpy.empty_like(mo_coeff)
mo_e = numpy.empty(mo_occ.shape)
s = mf.get_ovlp()
if (getattr(mo_coeff, 'orbsym', None) is not None or
(getattr(mo_coeff[0], 'orbsym', None) is not None and
getattr(mo_coeff[1], 'orbsym', None) is not None)):
orbsyma, orbsymb = mf.get_orbsym(mo_coeff, s)
def eig_(fock, mo_coeff, idx, es, cs):
if numpy.count_nonzero(idx) > 0:
orb = mo_coeff[:,idx]
f1 = reduce(numpy.dot, (orb.conj().T, fock, orb))
e, c = scipy.linalg.eigh(f1)
es[idx] = e
cs[:,idx] = numpy.dot(mo_coeff[:,idx], c)
for ir in set(orbsyma):
idx_ir = orbsyma == ir
eig_(fock[0], mo_coeff[0], idx_ir & occidxa, mo_e[0], mo[0])
eig_(fock[0], mo_coeff[0], idx_ir & viridxa, mo_e[0], mo[0])
for ir in set(orbsymb):
idx_ir = orbsymb == ir
eig_(fock[1], mo_coeff[1], idx_ir & occidxb, mo_e[1], mo[1])
eig_(fock[1], mo_coeff[1], idx_ir & viridxb, mo_e[1], mo[1])
else:
def eig_(fock, mo_coeff, idx, es, cs):
if numpy.count_nonzero(idx) > 0:
orb = mo_coeff[:,idx]
f1 = reduce(numpy.dot, (orb.conj().T, fock, orb))
e, c = scipy.linalg.eigh(f1)
es[idx] = e
c = numpy.dot(mo_coeff[:,idx], c)
cs[:,idx] = hf_symm._symmetrize_canonicalization_(mf, e, c, s)
eig_(fock[0], mo_coeff[0], occidxa, mo_e[0], mo[0])
eig_(fock[0], mo_coeff[0], viridxa, mo_e[0], mo[0])
eig_(fock[1], mo_coeff[1], occidxb, mo_e[1], mo[1])
eig_(fock[1], mo_coeff[1], viridxb, mo_e[1], mo[1])
orbsyma, orbsymb = mf.get_orbsym(mo, s)
mo = (lib.tag_array(mo[0], orbsym=orbsyma),
lib.tag_array(mo[1], orbsym=orbsymb))
return mo_e, mo
def get_orbsym(mol, mo_coeff, s=None, check=False):
if getattr(mo_coeff, 'orbsym', None) is not None:
orbsym = numpy.asarray(mo_coeff.orbsym)
else:
orbsym = (hf_symm.get_orbsym(mol, mo_coeff[0], s, check),
hf_symm.get_orbsym(mol, mo_coeff[1], s, check))
return orbsym
def get_wfnsym(mf, mo_coeff=None, mo_occ=None):
orbsyma, orbsymb = mf.get_orbsym(mo_coeff)
if mf.mol.groupname in ('SO3', 'Dooh', 'Coov'):
if numpy.any(orbsyma > 7):
logger.warn(mf, 'Wave-function symmetry for %s not supported. '
'Wfn symmetry is mapped to D2h/C2v group.',
mf.mol.groupname)
orbsyma = orbsyma % 10
orbsymb = orbsymb % 10
if mo_occ is None:
mo_occ = mf.mo_occ
wfnsym = 0
for ir in orbsyma[mo_occ[0] == 1]:
wfnsym ^= ir
for ir in orbsymb[mo_occ[1] == 1]:
wfnsym ^= ir
return wfnsym
class SymAdaptedUHF(uhf.UHF):
__doc__ = uhf.UHF.__doc__ + '''
Attributes for symmetry allowed UHF:
irrep_nelec : dict
Specify the number of alpha/beta electrons for particular irrep
{'ir_name':(int,int), ...}.
For the irreps not listed in these dicts, the program will choose the
occupancy based on the orbital energies.
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz', symmetry=True, charge=1, spin=1, verbose=0)
>>> mf = scf.RHF(mol)
>>> mf.scf()
-75.623975516256692
>>> mf.get_irrep_nelec()
{'A1': (3, 3), 'A2': (0, 0), 'B1': (1, 1), 'B2': (1, 0)}
>>> mf.irrep_nelec = {'B1': (1, 0)}
>>> mf.scf()
-75.429189192031131
>>> mf.get_irrep_nelec()
{'A1': (3, 3), 'A2': (0, 0), 'B1': (1, 0), 'B2': (1, 1)}
'''
def __init__(self, mol):
uhf.UHF.__init__(self, mol)
# number of electrons for each irreps
self.irrep_nelec = {}
self._keys = self._keys.union(['irrep_nelec'])
def dump_flags(self, verbose=None):
uhf.UHF.dump_flags(self, verbose)
if self.irrep_nelec:
logger.info(self, 'irrep_nelec %s', self.irrep_nelec)
return self
def build(self, mol=None):
if mol is None: mol = self.mol
if mol.symmetry:
for irname in self.irrep_nelec:
if irname not in self.mol.irrep_name:
logger.warn(self, 'No irrep %s', irname)
hf_symm.check_irrep_nelec(mol, self.irrep_nelec, self.nelec)
return uhf.UHF.build(self, mol)
def eig(self, h, s):
mol = self.mol
if not mol.symmetry:
return self._eigh(h, s)
nirrep = mol.symm_orb.__len__()
s = symm.symmetrize_matrix(s, mol.symm_orb)
ha = symm.symmetrize_matrix(h[0], mol.symm_orb)
cs = []
es = []
orbsym = []
for ir in range(nirrep):
e, c = self._eigh(ha[ir], s[ir])
cs.append(c)
es.append(e)
orbsym.append([mol.irrep_id[ir]] * e.size)
ea = numpy.hstack(es)
ca = hf_symm.so2ao_mo_coeff(mol.symm_orb, cs)
ca = lib.tag_array(ca, orbsym=numpy.hstack(orbsym))
hb = symm.symmetrize_matrix(h[1], mol.symm_orb)
cs = []
es = []
orbsym = []
for ir in range(nirrep):
e, c = self._eigh(hb[ir], s[ir])
cs.append(c)
es.append(e)
orbsym.append([mol.irrep_id[ir]] * e.size)
eb = numpy.hstack(es)
cb = hf_symm.so2ao_mo_coeff(mol.symm_orb, cs)
cb = lib.tag_array(cb, orbsym=numpy.hstack(orbsym))
return (ea,eb), (ca,cb)
def get_grad(self, mo_coeff, mo_occ, fock=None):
g = uhf.UHF.get_grad(self, mo_coeff, mo_occ, fock)
if self.mol.symmetry:
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
viridxa = ~occidxa
viridxb = ~occidxb
orbsyma, orbsymb = self.get_orbsym(mo_coeff, self.get_ovlp())
sym_forbida = orbsyma[viridxa].reshape(-1,1) != orbsyma[occidxa]
sym_forbidb = orbsymb[viridxb].reshape(-1,1) != orbsymb[occidxb]
sym_forbid = numpy.hstack((sym_forbida.ravel(),
sym_forbidb.ravel()))
g[sym_forbid] = 0
return g
def get_occ(self, mo_energy=None, mo_coeff=None):
''' We assumed mo_energy are grouped by symmetry irreps, (see function
self.eig). The orbitals are sorted after SCF.
'''
if mo_energy is None: mo_energy = self.mo_energy
mol = self.mol
if not mol.symmetry:
return uhf.UHF.get_occ(self, mo_energy, mo_coeff)
orbsyma, orbsymb = self.get_orbsym(mo_coeff, self.get_ovlp())
mo_occ = numpy.zeros_like(mo_energy)
idx_ea_left = []
idx_eb_left = []
neleca_fix = nelecb_fix = 0
for i, ir in enumerate(mol.irrep_id):
irname = mol.irrep_name[i]
ir_idxa = numpy.where(orbsyma == ir)[0]
ir_idxb = numpy.where(orbsymb == ir)[0]
if irname in self.irrep_nelec:
if isinstance(self.irrep_nelec[irname], (int, numpy.integer)):
nelecb = self.irrep_nelec[irname] // 2
neleca = self.irrep_nelec[irname] - nelecb
else:
neleca, nelecb = self.irrep_nelec[irname]
ea_idx = numpy.argsort(mo_energy[0][ir_idxa].round(9), kind='mergesort')
eb_idx = numpy.argsort(mo_energy[1][ir_idxb].round(9), kind='mergesort')
mo_occ[0,ir_idxa[ea_idx[:neleca]]] = 1
mo_occ[1,ir_idxb[eb_idx[:nelecb]]] = 1
neleca_fix += neleca
nelecb_fix += nelecb
else:
idx_ea_left.append(ir_idxa)
idx_eb_left.append(ir_idxb)
nelec = self.nelec
neleca_float = nelec[0] - neleca_fix
nelecb_float = nelec[1] - nelecb_fix
assert(neleca_float >= 0)
assert(nelecb_float >= 0)
if len(idx_ea_left) > 0:
idx_ea_left = numpy.hstack(idx_ea_left)
ea_left = mo_energy[0][idx_ea_left]
ea_sort = numpy.argsort(ea_left.round(9), kind='mergesort')
occ_idx = idx_ea_left[ea_sort][:neleca_float]
mo_occ[0][occ_idx] = 1
if len(idx_eb_left) > 0:
idx_eb_left = numpy.hstack(idx_eb_left)
eb_left = mo_energy[1][idx_eb_left]
eb_sort = numpy.argsort(eb_left.round(9), kind='mergesort')
occ_idx = idx_eb_left[eb_sort][:nelecb_float]
mo_occ[1][occ_idx] = 1
vir_idx = (mo_occ[0]==0)
if self.verbose >= logger.INFO and numpy.count_nonzero(vir_idx) > 0:
noccsa = []
noccsb = []
for i, ir in enumerate(mol.irrep_id):
irname = mol.irrep_name[i]
ir_idxa = orbsyma == ir
ir_idxb = orbsymb == ir
noccsa.append(numpy.count_nonzero(mo_occ[0][ir_idxa]))
noccsb.append(numpy.count_nonzero(mo_occ[1][ir_idxb]))
ir_id2name = dict(zip(mol.irrep_id, mol.irrep_name))
ehomo = ehomoa = max(mo_energy[0][mo_occ[0]>0 ])
elumo = elumoa = min(mo_energy[0][mo_occ[0]==0])
irhomoa = ir_id2name[orbsyma[mo_energy[0] == ehomoa][0]]
irlumoa = ir_id2name[orbsyma[mo_energy[0] == elumoa][0]]
logger.info(self, 'alpha HOMO (%s) = %.15g LUMO (%s) = %.15g',
irhomoa, ehomoa, irlumoa, elumoa)
if nelecb_float > 0:
ehomob = max(mo_energy[1][mo_occ[1]>0 ])
elumob = min(mo_energy[1][mo_occ[1]==0])
irhomob = ir_id2name[orbsymb[mo_energy[1] == ehomob][0]]
irlumob = ir_id2name[orbsymb[mo_energy[1] == elumob][0]]
logger.info(self, 'beta HOMO (%s) = %.15g LUMO (%s) = %.15g',
irhomob, ehomob, irlumob, elumob)
ehomo = max(ehomoa,ehomob)
elumo = min(elumoa,elumob)
logger.debug(self, 'alpha irrep_nelec = %s', noccsa)
logger.debug(self, 'beta irrep_nelec = %s', noccsb)
hf_symm._dump_mo_energy(mol, mo_energy[0], mo_occ[0], ehomo, elumo,
orbsyma, 'alpha-', verbose=self.verbose)
hf_symm._dump_mo_energy(mol, mo_energy[1], mo_occ[1], ehomo, elumo,
orbsymb, 'beta-', verbose=self.verbose)
if mo_coeff is not None and self.verbose >= logger.DEBUG:
ovlp_ao = self.get_ovlp()
ss, s = self.spin_square((mo_coeff[0][:,mo_occ[0]>0],
mo_coeff[1][:,mo_occ[1]>0]), ovlp_ao)
logger.debug(self, 'multiplicity <S^2> = %.8g 2S+1 = %.8g', ss, s)
return mo_occ
def _finalize(self):
uhf.UHF._finalize(self)
ea = numpy.hstack(self.mo_energy[0])
eb = numpy.hstack(self.mo_energy[1])
# Using mergesort because it is stable. We don't want to change the
# ordering of the symmetry labels when two orbitals are degenerated.
oa_sort = numpy.argsort(ea[self.mo_occ[0]>0 ].round(9), kind='mergesort')
va_sort = numpy.argsort(ea[self.mo_occ[0]==0].round(9), kind='mergesort')
ob_sort = numpy.argsort(eb[self.mo_occ[1]>0 ].round(9), kind='mergesort')
vb_sort = numpy.argsort(eb[self.mo_occ[1]==0].round(9), kind='mergesort')
idxa = numpy.arange(ea.size)
idxa = numpy.hstack((idxa[self.mo_occ[0]> 0][oa_sort],
idxa[self.mo_occ[0]==0][va_sort]))
idxb = numpy.arange(eb.size)
idxb = numpy.hstack((idxb[self.mo_occ[1]> 0][ob_sort],
idxb[self.mo_occ[1]==0][vb_sort]))
self.mo_energy = (ea[idxa], eb[idxb])
orbsyma, orbsymb = self.get_orbsym(self.mo_coeff, self.get_ovlp())
self.mo_coeff = (lib.tag_array(self.mo_coeff[0][:,idxa], orbsym=orbsyma[idxa]),
lib.tag_array(self.mo_coeff[1][:,idxb], orbsym=orbsymb[idxb]))
self.mo_occ = (self.mo_occ[0][idxa], self.mo_occ[1][idxb])
if self.chkfile:
chkfile.dump_scf(self.mol, self.chkfile, self.e_tot, self.mo_energy,
self.mo_coeff, self.mo_occ, overwrite_mol=False)
return self
@lib.with_doc(analyze.__doc__)
def analyze(self, verbose=None, with_meta_lowdin=WITH_META_LOWDIN,
**kwargs):
if verbose is None: verbose = self.verbose
return analyze(self, verbose, with_meta_lowdin, **kwargs)
@lib.with_doc(get_irrep_nelec.__doc__)
def get_irrep_nelec(self, mol=None, mo_coeff=None, mo_occ=None, s=None):
if mol is None: mol = self.mol
if mo_occ is None: mo_occ = self.mo_occ
if mo_coeff is None: mo_coeff = self.mo_coeff
if s is None: s = self.get_ovlp()
return get_irrep_nelec(mol, mo_coeff, mo_occ, s)
def get_orbsym(self, mo_coeff=None, s=None):
if mo_coeff is None:
mo_coeff = self.mo_coeff
if s is None:
s = self.get_ovlp()
return get_orbsym(self.mol, mo_coeff, s)
orbsym = property(get_orbsym)
get_wfnsym = get_wfnsym
wfnsym = property(get_wfnsym)
canonicalize = canonicalize
UHF = SymAdaptedUHF
class HF1e(UHF):
def scf(self, *args):
logger.info(self, '\n')
logger.info(self, '******** 1 electron system ********')
self.converged = True
h1e = self.get_hcore(self.mol)
s1e = self.get_ovlp(self.mol)
self.mo_energy, self.mo_coeff = self.eig([h1e]*2, s1e)
self.mo_occ = self.get_occ(self.mo_energy, self.mo_coeff)
self.e_tot = self.mo_energy[0][self.mo_occ[0]>0][0] + self.mol.energy_nuc()
self._finalize()
return self.e_tot
del(WITH_META_LOWDIN)
if __name__ == '__main__':
from pyscf import gto
mol = gto.Mole()
mol.build(
verbose = 1,
output = None,
atom = [['H', (0.,0.,0.)],
['H', (0.,0.,1.)], ],
basis = {'H': 'ccpvdz'},
symmetry = True,
charge = -1,
spin = 1
)
method = UHF(mol)
method.verbose = 5
method.irrep_nelec['A1u'] = (1,0)
energy = method.kernel()
print(energy)
method.analyze()
| apache-2.0 | 201,078,578,244,645,060 | 38.491979 | 113 | 0.534642 | false | 2.890034 | false | false | false |
daichi-yoshikawa/dnn | examples/mnist/nn_mnist.py | 1 | 3211 | # Authors: Daichi Yoshikawa <[email protected]>
# License: BSD 3 clause
import sys
sys.path.append('../..')
import json
import numpy as np
"""Configure logger before importing dnnet."""
import logging.config
with open('../common/logging.json') as f:
data = json.load(f)
logging.config.dictConfig(data)
import dnnet
from dnnet.config import Config
Config.enable_gpu()
from dnnet.neuralnet import NeuralNetwork
from dnnet.utils.nn_utils import scale_normalization
from dnnet.training.optimizer import SGD, Momentum, AdaGrad, Adam, AdaDelta, RMSProp, SMORMS3
from dnnet.training.weight_initialization import DefaultInitialization, He
from dnnet.training.loss_function import MultinomialCrossEntropy
from dnnet.training.loss_function import SquaredError
from dnnet.layers.affine import AffineLayer
from dnnet.layers.activation import Activation, ActivationLayer
from dnnet.layers.dropout import DropoutLayer
from dnnet.layers.batch_norm import BatchNormLayer
from data import get_mnist
data_dir = '../../data'
x, y = get_mnist(data_dir)
scale_normalization(x)
x = x.reshape(-1, 1, 28, 28)
dtype = np.float32
force_cpu = {
'activation': True,
'dropout': True,
'batch_norm': True
}
model = NeuralNetwork(input_shape=(1, 28, 28), dtype=dtype)
#model = NeuralNetwork(input_shape=784, dtype=dtype)
model.add(DropoutLayer(drop_ratio=0.2, force_cpu=force_cpu['dropout']))
model.add(AffineLayer(
output_shape=400, weight_initialization=He()))
model.add(BatchNormLayer(force_cpu=force_cpu['batch_norm']))
model.add(ActivationLayer(activation=Activation.Type.relu,
force_cpu=force_cpu['activation']))
model.add(DropoutLayer(drop_ratio=0.2, force_cpu=force_cpu['dropout']))
model.add(AffineLayer(
output_shape=400, weight_initialization=He()))
model.add(BatchNormLayer(force_cpu=force_cpu['batch_norm']))
model.add(ActivationLayer(activation=Activation.Type.relu,
force_cpu=force_cpu['activation']))
model.add(AffineLayer(
output_shape=10, weight_initialization=DefaultInitialization()))
model.add(BatchNormLayer(force_cpu=force_cpu['batch_norm']))
model.add(ActivationLayer(activation=Activation.Type.softmax,
force_cpu=force_cpu['activation']))
model.compile()
config_str = model.get_config_str()
print(config_str)
#optimizer = SGD(learning_rate=3e-2, weight_decay=1e-3, dtype=dtype)
#optimizer = Momentum(learning_rate=3e-2, weight_decay=1e-3, momentum_rate=0.99, dtype=dtype)
optimizer = AdaGrad(learning_rate=3e-2, weight_decay=1e-3, dtype=dtype)
#optimizer = Adam(learning_rate=3e-2, weight_decay=1e-3, dtype=dtype)
#optimizer = AdaDelta(learning_rate=3e-2, weight_decay=1e-3, gamma=0.99, dtype=dtype)
#optimizer = RMSProp(learning_rate=3e-2, weight_decay=1e-3, dtype=dtype)
lc = model.fit(
x=x, y=y, epochs=5, batch_size=100, optimizer=optimizer,
loss_function=MultinomialCrossEntropy(),
learning_curve=True, shuffle=True, shuffle_per_epoch=True,
test_data_ratio=0.142857, # Use 60,000 for training and 10,000 for test.
train_data_ratio_for_eval=0.01)
lc.plot(figsize=(8,10), fontsize=12)
model.show_filters(0, shape=(28, 28), layout=(10, 10), figsize=(12, 12))
| bsd-3-clause | -1,805,999,635,159,180,500 | 35.078652 | 93 | 0.737776 | false | 3.093449 | true | false | false |
yingcuhk/LeetCode | Algorithms/#321 Create Maximum Number/PythonCode.py | 1 | 4004 |
"""
Given two arrays of length m and n with digits 0-9 representing two numbers. Create the maximum number of length k <= m + n from digits of the two. The relative order of the digits from the same array must be preserved. Return an array of the k digits. You should try to optimize your time and space complexity.
Example 1:
nums1 = [3, 4, 6, 5]
nums2 = [9, 1, 2, 5, 8, 3]
k = 5
return [9, 8, 6, 5, 3]
Example 2:
nums1 = [6, 7]
nums2 = [6, 0, 4]
k = 5
return [6, 7, 6, 0, 4]
Example 3:
nums1 = [3, 9]
nums2 = [8, 9]
k = 3
return [9, 8, 9]
"""
class Solution(object):
def maxNumber(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[int]
"""
S = [(0,0)]
remain_k = k
pos = []
while remain_k > 0:
new_S = []
highdig = -1
for s in S:
canddig, state = self.highest_digit(nums1,nums2,s,remain_k)
if canddig > highdig:
highdig = canddig
new_S = state
if canddig == highdig:
new_S = list(set(new_S + state))
#print new_S
pos.append(highdig)
S = new_S
remain_k = remain_k-1
return pos
#return self.maxNum_recursive(nums1,nums2,0,0,k)
def highest_digit(self,nums1,nums2,state,remain_k):
beg1 = state[0]
beg2 = state[1]
N1 = len(nums1)
N2 = len(nums2)
if remain_k == 1:
return max(nums1[beg1:]+nums2[beg2:]), [(N1,N2)]
ind1,ind2 = beg1,beg2
highdig1 = -1
pos1 = -1
while N1-ind1+N2-beg2 >= remain_k and ind1 < N1:
if nums1[ind1] > highdig1:
highdig1 = nums1[ind1]
pos1 = ind1
ind1 += 1
highdig2 = -1
pos2 = -1
while N1-beg1+N2-ind2 >= remain_k and ind2 < N2:
if nums2[ind2] > highdig2:
highdig2 = nums2[ind2]
pos2 = ind2
ind2 +=1
if highdig1 > highdig2:
return highdig1, [(pos1+1,beg2)]
elif highdig2 > highdig1:
return highdig2, [(beg1, pos2+1)]
else:
return highdig1, [(pos1+1,beg2),(beg1, pos2+1)]
"""
# a recursive solution
def maxNum_recursive(self,nums1,nums2,beg1,beg2,k):
N1 = len(nums1)
N2 = len(nums2)
if k == 0:
return []
highdig1 = -1
pos1 = -1
ind1,ind2 = beg1,beg2
while N1-ind1+N2-beg2 >= k and ind1 < N1:
if nums1[ind1] > highdig1:
highdig1 = nums1[ind1]
pos1 = ind1
ind1 += 1
highdig2 = -1
pos2 = -1
while N1-beg1+N2-ind2 >= k and ind2 < N2:
if nums2[ind2] > highdig2:
highdig2 = nums2[ind2]
pos2 = ind2
ind2 +=1
if highdig1 > highdig2:
return [highdig1]+self.maxNum_recursive(nums1,nums2,pos1+1,beg2,k-1)
elif highdig2 > highdig1:
return [highdig2]+self.maxNum_recursive(nums1,nums2,beg1,pos2+1,k-1)
else:
if pos2 == N2-1:
return [highdig1]+self.maxNum_recursive(nums1,nums2,pos1+1,beg2,k-1)
if pos1 == N1-1:
return [highdig2]+self.maxNum_recursive(nums1,nums2,beg1,pos2+1,k-1)
pos1 = [highdig1]+self.maxNum_recursive(nums1,nums2,pos1+1,beg2,k-1)
pos2 = [highdig2]+self.maxNum_recursive(nums1,nums2,beg1,pos2+1,k-1)
return self.the_larger_one(pos1,pos2)
def the_larger_one(self,pos1,pos2):
for val1,val2 in zip(pos1,pos2):
if val1 > val2:
return pos1
if val2 > val1:
return pos2
return pos1
"""
| mit | -3,520,379,620,287,191,600 | 28.021739 | 311 | 0.488511 | false | 3.135474 | false | false | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/it/wmz.py | 1 | 4252 | import requests,re,time,xbmcaddon
import resolveurl as urlresolver
from ..common import clean_title, clean_search,send_log,error_log
from ..scraper import Scraper
dev_log = xbmcaddon.Addon('script.module.universalscrapers').getSetting("dev_log")
User_Agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
class wmz(Scraper):
domains = ['http://www.watchmovieszone.com']
name = "WatchMoviesZone"
sources = []
def __init__(self):
self.base_link = 'http://www.watchmovieszone.com'
if dev_log=='true':
self.start_time = time.time()
def scrape_movie(self, title, year, imdb, debrid = False):
try:
search_id = clean_search(title.lower())
start_url = '%s/Movie/searchMovieName/?movie=%s' %(self.base_link,search_id)
headers={'User-Agent':User_Agent}
html = requests.get(start_url,headers=headers,timeout=5).content
match = re.compile('"ID":"(.+?)","movieName":"(.+?)"',re.DOTALL).findall(html)
for ID,item_name in match:
if 'dubbed' not in item_name.lower():
if clean_title(title).lower() in clean_title(item_name).lower():
if year in item_name:
item_name = item_name.replace(' ','_')
url = '%s/Movie/Index/%s/%s' %(self.base_link,ID,item_name)
#print 'wmz Movie pass '+url
#print 'wmz ID ' +ID
self.get_source(url,ID)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,'Check Search')
return self.sources
def get_source(self,url,ID):
try:
# url not needed
new_url = '%s/Movie/getmyLinks/?movID=%s' %(self.base_link,ID)
#print '###### '+new_url
headers={'User-Agent':User_Agent}
OPEN = requests.get(new_url,headers=headers,timeout=5).content
#print OPEN
Regex = re.compile('"picLink":"(.+?)"',re.DOTALL).findall(OPEN)
count = 0
for link in Regex:
#print link
if 'streamango.com' in link:
try:
get_res=requests.get(link,timeout=5).content
qual = re.compile('{type:"video/mp4".+?height:(.+?),',re.DOTALL).findall(get_res)[0]
if '1080' in qual:
rez='1080p'
elif '720' in qual:
rez = '720p'
else:rez= 'DVD'
except:rez='DVD'
count +=1
self.sources.append({'source': 'Streamango', 'quality': rez, 'scraper': self.name, 'url': link,'direct': False})
if 'openload' in link:
try:
chk = requests.get(link).content
rez = re.compile('"description" content="(.+?)"',re.DOTALL).findall(chk)[0]
if '1080' in rez:
res='1080p'
elif '720' in rez:
res='720p'
else:res='DVD'
except: res = 'DVD'
count +=1
self.sources.append({'source': 'Openload', 'quality': res, 'scraper': self.name, 'url': link,'direct': False})
else:
if urlresolver.HostedMediaFile(link).valid_url():
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].split('.')[0].title()
count +=1
self.sources.append({'source': host, 'quality': 'DVD', 'scraper': self.name, 'url': link,'direct': False})
if dev_log=='true':
end_time = time.time() - self.start_time
send_log(self.name,end_time,count)
except:
pass
| gpl-2.0 | 652,577,545,154,177,800 | 46.775281 | 144 | 0.46731 | false | 4.128155 | false | false | false |
jokuf/hack-blog | users/migrations/0002_auto_20170322_2028.py | 1 | 2798 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-22 20:28
from __future__ import unicode_literals
from django.db import migrations, models
import users.managers
class Migration(migrations.Migration):
dependencies = [
('auth', '0008_alter_user_username_max_length'),
('users', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='author',
options={'verbose_name': 'user', 'verbose_name_plural': 'users'},
),
migrations.AlterModelManagers(
name='author',
managers=[
('objects', users.managers.UserManager()),
],
),
migrations.AddField(
model_name='author',
name='avatar',
field=models.ImageField(blank=True, null=True, upload_to='avatars/'),
),
migrations.AddField(
model_name='author',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='author',
name='is_active',
field=models.BooleanField(default=True, verbose_name='active'),
),
migrations.AddField(
model_name='author',
name='is_superuser',
field=models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status'),
),
migrations.AddField(
model_name='author',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
migrations.AlterField(
model_name='author',
name='date_joined',
field=models.DateTimeField(auto_now_add=True, verbose_name='date joined'),
),
migrations.AlterField(
model_name='author',
name='email',
field=models.EmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='author',
name='first_name',
field=models.CharField(blank=True, max_length=30, verbose_name='first name'),
),
migrations.AlterField(
model_name='author',
name='last_name',
field=models.CharField(blank=True, max_length=30, verbose_name='last name'),
),
]
| mit | -6,814,778,244,652,638,000 | 37.861111 | 256 | 0.585061 | false | 4.448331 | false | false | false |
asteca/ASteCA | packages/best_fit/DEPRECATED/abcpmc_algor_DEPRECATED.py | 1 | 9822 |
import numpy as np
from scipy.optimize import differential_evolution as DE
import time as t
from .abcpmc import sampler, threshold
from ..synth_clust import synth_cluster
from . import likelihood
from .emcee_algor import varPars, closeSol, discreteParams, convergenceVals
def main(
lkl_method, e_max, err_lst, completeness, max_mag_syn,
fundam_params, obs_clust, theor_tracks, R_V, ext_coefs, st_dist_mass,
N_fc, cmpl_rnd, err_rnd, nwalkers_abc, nsteps_abc, nburn_abc,
priors_abc):
varIdxs, ndim, ranges = varPars(fundam_params)
def dist(synth_clust, obs_clust):
lkl = np.inf
if synth_clust:
lkl = likelihood.main(lkl_method, synth_clust, obs_clust)
return lkl
def postfn(model):
# Re-scale z and M
model_scale = [
model[0] / 100., model[1], model[2], model[3] * 10.,
model[4] * 1000., model[5]]
check_ranges = [
r[0] <= p <= r[1] for p, r in zip(*[model_scale, ranges[varIdxs]])]
synth_clust = []
# If some parameter is outside of the given ranges, don't bother
# obtaining the proper model.
if all(check_ranges):
model_proper = closeSol(fundam_params, varIdxs, model_scale)
# Metallicity and age indexes to identify isochrone.
m_i = fundam_params[0].index(model_proper[0])
a_i = fundam_params[1].index(model_proper[1])
isochrone = theor_tracks[m_i][a_i]
# Generate synthetic cluster.
synth_clust = synth_cluster.main(
e_max, err_lst, completeness, max_mag_syn, st_dist_mass,
isochrone, R_V, ext_coefs, N_fc, cmpl_rnd, err_rnd,
model_proper)
return synth_clust
# TODO add these parameters to the input params file
alpha, init_eps = 95, None
N_conv, tol_conv = 50., 0.01
max_secs = 22. * 60. * 60.
# Break out when AF is low.
# af_low, af_min_steps = 0.001, .1
max_t_walker = 30.
# eps_stuck_perc, N_eps_stuck_max = .005, 100
# Start timing.
elapsed = 0.
available_secs = max(30, max_secs)
start_t = t.time()
abcsampler = sampler.Sampler(
N=nwalkers_abc, Y=obs_clust, postfn=postfn, dist=dist)
# Set proposal
# sampler.particle_proposal_cls = sampler.OLCMParticleProposal
if init_eps is None:
# Estimate initial threshold value using DE.
def lnprob(model):
synth_clust = postfn(model)
return dist(synth_clust, obs_clust)
# Scale parameters bounds.
bounds = [
ranges[0] * 100., ranges[1], ranges[2], ranges[3] / 10.,
ranges[4] / 1000., ranges[5]]
result = DE(lnprob, bounds, maxiter=20)
init_eps = 4. * result.fun
print(" Initial threshold value: {:.2f}".format(init_eps))
# old_eps = init_eps
# TODO pass type of threshold from params file
# eps = threshold.LinearEps(T, 5000, init_eps)
eps = threshold.ConstEps(nsteps_abc, init_eps)
# Stddev values as full range.
std = np.eye(ndim) * (ranges.max(axis=1) - ranges.min(axis=1))
# Means as middle points in ranges.
means = (ranges.max(axis=1) + ranges.min(axis=1)) / 2.
# Scale values.
std[0], means[0] = std[0] * 100, means[0] * 100
std[3], means[3] = std[3] / 10, means[3] / 10
std[4], means[4] = std[4] / 1000., means[4] / 1000.
# Gaussian prior.
print(means)
print(std)
prior = sampler.GaussianPrior(mu=means, sigma=std)
# # We'll track how the average autocorrelation time estimate changes
# tau_index, autocorr_vals = 0, np.empty(nsteps_abc)
# # This will be useful to testing convergence
# old_tau = np.inf
# Check for convergence every 2% of steps or 100, whichever value
# is lower.
# N_steps_conv = min(int(nsteps_abc * 0.02), 100)
map_sol_old, N_models, prob_mean = [[], np.inf], 0, []
# N_eps_stuck = 0
chains_nruns, maf_steps, map_lkl = [], [], []
milestones = list(range(5, 101, 5))
for pool in abcsampler.sample(prior, eps):
print(
pool.t, pool.eps, pool.ratio, np.min(pool.dists),
np.mean(pool.dists))
chains_nruns.append(pool.thetas)
maf = pool.ratio
maf_steps.append([pool.t, maf])
N_models += nwalkers_abc / maf
# reduce eps value
# old_eps = eps.eps
eps.eps = np.percentile(pool.dists, alpha)
# # Check if threshold is stuck.
# if abs(eps.eps - old_eps) < eps_stuck_perc * eps.eps:
# N_eps_stuck += 1
# else:
# N_eps_stuck = 0
# if N_eps_stuck > N_eps_stuck_max:
# print(" Threshold is stuck (runs={}).".format(pool.t + 1))
# break
# if maf < af_low and pool.t > int(af_min_steps * nsteps_abc):
# print(" AF<{} (runs={})".format(af_low, pool.t + 1))
# break
if t.time() - start_t > (max_t_walker * nwalkers_abc):
print(" Sampler is stuck (runs={})".format(pool.t + 1))
break
elapsed += t.time() - start_t
if elapsed >= available_secs:
print(" Time consumed (runs={})".format(pool.t + 1))
break
start_t = t.time()
# # Only check convergence every 'N_steps_conv' steps
# if (pool.t + 1) % N_steps_conv:
# continue
# # Compute the autocorrelation time so far. Using tol=0 means that
# # we'll always get an estimate even if it isn't trustworthy.
# try:
# tau = autocorr.integrated_time(np.array(chains_nruns), tol=0)
# autocorr_vals[tau_index] = np.nanmean(tau)
# tau_index += 1
# # Check convergence
# converged = np.all(tau * N_conv < (pool.t + 1))
# converged &= np.all(np.abs(old_tau - tau) / tau < tol_conv)
# if converged:
# print(" Convergence achieved (runs={}).".format(pool.t + 1))
# break
# old_tau = tau
# except FloatingPointError:
# pass
# Store MAP solution in this iteration.
prob_mean.append([pool.t, np.mean(pool.dists)])
idx_best = np.argmin(pool.dists)
# Update if a new optimal solution was found.
if pool.dists[idx_best] < map_sol_old[1]:
pars = pool.thetas[idx_best]
# pars = scaleParams(model)
pars = [pars[0] / 100., pars[1], pars[2], pars[3] * 10.,
pars[4] * 1000., pars[5]]
map_sol_old = [
closeSol(fundam_params, varIdxs, pars),
pool.dists[idx_best]]
map_lkl.append([pool.t, map_sol_old[1]])
# Print progress.
percentage_complete = (100. * (pool.t + 1) / nsteps_abc)
if len(milestones) > 0 and percentage_complete >= milestones[0]:
map_sol, logprob = map_sol_old
print("{:>3}% ({:.3f}) LP={:.1f} ({:g}, {:g}, {:.3f}, {:.2f}"
", {:g}, {:.2f})".format(
milestones[0], maf, logprob, *map_sol) +
" [{:.0f} m/s]".format(N_models / elapsed))
milestones = milestones[1:]
runs = pool.t + 1
# Evolution of the mean autocorrelation time.
tau_autocorr = np.array([np.nan] * 10) # autocorr_vals[:tau_index]
tau_index = np.nan
N_steps_conv = runs
# Final MAP fit.
idx_best = np.argmin(pool.dists)
pars = pool.thetas[idx_best]
# pars = scaleParams(model)
pars = [
pars[0] / 100., pars[1], pars[2], pars[3] * 10., pars[4] * 1000.,
pars[5]]
map_sol = closeSol(fundam_params, varIdxs, pars)
map_lkl_final = pool.dists[idx_best]
abcsampler.close()
# Shape: (runs, nwalkers, ndim)
chains_nruns = np.array(chains_nruns)
# De-scale parameters.
chains_nruns[:, :, 0] = chains_nruns[:, :, 0] / 100.
chains_nruns[:, :, 3] = chains_nruns[:, :, 3] * 10.
chains_nruns[:, :, 4] = chains_nruns[:, :, 4] * 1000.
# Burn-in range.
Nb = int(runs * nburn_abc)
# Burn-in. Shape: (ndim, nwalkers, runs)
pars_chains_bi = discreteParams(
fundam_params, varIdxs, chains_nruns[:Nb, :, :]).T
# Change values for the discrete parameters with the closest valid values.
chains_nruns = discreteParams(
fundam_params, varIdxs, chains_nruns[Nb:, :, :])
mcmc_trace = chains_nruns.reshape(-1, ndim).T
# import matplotlib.pyplot as plt
# import corner
# corner.corner(
# mcmc_trace.T, quantiles=[0.16, 0.5, 0.84], show_titles=True)
# # levels=(1 - np.exp(-0.5),))
# plt.savefig("corner.png", dpi=300)
# Convergence parameters.
acorr_t, max_at_c, min_at_c, geweke_z, emcee_acorf, mcmc_ess, minESS,\
mESS, mESS_epsilon = convergenceVals(
'abc', ndim, varIdxs, N_conv, chains_nruns, mcmc_trace)
# Store mean solution.
mean_sol = closeSol(fundam_params, varIdxs, np.mean(mcmc_trace, axis=1))
isoch_fit_params = {
'varIdxs': varIdxs, 'nsteps_abc': runs, 'mean_sol': mean_sol,
'nburn_abc': Nb, 'map_sol': map_sol, 'map_lkl': map_lkl,
'map_lkl_final': map_lkl_final, 'prob_mean': prob_mean,
'mcmc_elapsed': elapsed, 'mcmc_trace': mcmc_trace,
'pars_chains_bi': pars_chains_bi, 'pars_chains': chains_nruns.T,
'maf_steps': maf_steps, 'autocorr_time': acorr_t,
'max_at_c': max_at_c, 'min_at_c': min_at_c,
'minESS': minESS, 'mESS': mESS, 'mESS_epsilon': mESS_epsilon,
'emcee_acorf': emcee_acorf, 'geweke_z': geweke_z,
'mcmc_ess': mcmc_ess,
'N_steps_conv': N_steps_conv, 'N_conv': N_conv, 'tol_conv': tol_conv,
'tau_index': tau_index, 'tau_autocorr': tau_autocorr
}
return isoch_fit_params
| gpl-3.0 | -8,437,757,124,377,861,000 | 35.786517 | 79 | 0.563225 | false | 3.073217 | false | false | false |
konker/switchd | util/pidfile.py | 1 | 1188 | import fcntl
import os
# FROM: http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/
class PidFile(object):
"""Context manager that locks a pid file. Implemented as class
not generator because daemon.py is calling .__exit__() with no parameters
instead of the None, None, None specified by PEP-343."""
# pylint: disable=R0903
def __init__(self, path):
self.path = path
self.pidfile = None
def __enter__(self):
self.pidfile = open(self.path, "a+")
try:
fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
raise SystemExit("Already running according to " + self.path)
self.pidfile.seek(0)
self.pidfile.truncate()
self.pidfile.write(str(os.getpid()))
self.pidfile.flush()
self.pidfile.seek(0)
return self.pidfile
def __exit__(self, exc_type=None, exc_value=None, exc_tb=None):
try:
self.pidfile.close()
except IOError as err:
# ok if file was just closed elsewhere
if err.errno != 9:
raise
os.remove(self.path)
| mit | 6,279,858,035,278,760,000 | 32 | 89 | 0.601852 | false | 3.869707 | false | false | false |
BK-TN/Islander | systems.py | 1 | 4654 | import components, actions
import pygame
import math
from collections import defaultdict
from point import Point
class DrawingSystem:
def __init__(self, screen, camera_target):
self.screen = screen
self.camera_pos = Point(0,0,0)
self.camera_target = camera_target
self.tileset = pygame.image.load("tileset.png") #12x16
self.tileset.set_colorkey((0,0,0))
self.tilew = 12
self.tileh = 16
self.entities = []
def check_entity(self, entity):
pass
def process(self, world):
def draw(drawable, draw_background):
# Find the tile to use based on the ASCII value of the char to draw
src_x = ord(drawable.char) % 16
src_y = math.floor(ord(drawable.char) / 16)
# Create the rect this tile should be drawn in
rect = pygame.Rect(
(screentiles_x / 2 - self.camera_pos.x + x) * self.tilew,
(screentiles_y / 2 - self.camera_pos.y + y) * self.tileh,
self.tilew,
self.tileh)
# Set the tile color by changing the tileset's palette (Which is really fast)
self.tileset.set_palette_at(1,drawable.color)
if draw_background:
pygame.draw.rect(self.screen, drawable.bgcolor, rect)
# Draw tile
self.screen.blit(
self.tileset,
(rect.x,rect.y),
pygame.Rect(src_x * self.tilew, src_y * self.tileh, self.tilew, self.tileh)
)
if self.camera_target != None:
pos = world.find_pos(self.camera_target)
self.camera_pos = pos
self.screen.fill((0,0,0))
# Find the max amount of tiles that fit the with and height of the screen
# So we can calculate the center of it
screentiles_x = self.screen.get_width() / self.tilew
screentiles_y = self.screen.get_height() / self.tileh
# Calculate 'borders' to draw within
left = math.floor(self.camera_pos.x - screentiles_x/2)
right = math.floor(self.camera_pos.x + screentiles_x/2)
top = math.floor(self.camera_pos.y - screentiles_y/2)
bottom = math.floor(self.camera_pos.y + screentiles_y/2)
for x in range(left,right):
for y in range(top,bottom):
#gridslice = sorted(world.search_slice(x,y),key=lambda e: world.find_pos(e).z)
drawn = False
for z in range(self.camera_pos.z,10):
if drawn: break
entities_on_pos = world.check_spot(Point(x,y,z))
drawables = [d for d in (e.get(components.Drawable) for e in entities_on_pos) if d != None]
if len(drawables) > 0:
drawables = sorted(drawables, key=lambda d: d.depth)
draw(drawables[0], z == self.camera_pos.z)
drawn = True
pygame.display.flip()
class MovementSystem:
def __init__(self):
self.entities = []
def check_entity(self, entity):
if entity.has(components.Character) or entity.has(components.MoveRight):
self.entities.append(entity)
def process(self, world):
def try_move(world, entity, pos):
can_move = True
physical_comp = entity.get(components.Physical)
if physical_comp != None:
space_left = world.get_spot_space(pos)
if space_left < physical_comp.volume:
can_move = False
if can_move:
world.move_entity(entity, pos)
for e in self.entities:
character = e.get(components.Character)
moveright = e.get(components.MoveRight)
if character != None:
movement = e.get_actions(actions.MoveAction)
for mov in movement:
try_move(world, e, Point(mov.xtarget, mov.ytarget, 0)) #TODO: add a.ztarget
if moveright != None:
pos = world.find_pos(e)
try_move(world, e, Point(pos.x + 1, pos.y, pos.z))
class PhysicsSystem:
def __init__(self):
self.entities = []
def check_entity(self, entity):
if entity.has(components.Physical):
self.entities.append(entity)
def process(self, world):
for e in self.entities:
phys = e.get(components.Physical)
pos = world.find_pos(e)
pos_below = Point(pos.x,pos.y,pos.z+1)
space_below = world.get_spot_space(pos_below)
if space_below < phys.volume:
world.move_entity(e,pos_below)
| mit | 6,984,329,801,355,357,000 | 38.777778 | 111 | 0.561023 | false | 3.708367 | false | false | false |
sadimanna/computer_vision | clustering/kmeansppclustering_with_gap_statistic.py | 1 | 2599 | #K-Means++ Clustering with Gap Statistic to determine the optimal number of clusters
import sys
import numpy as np
import scipy.io as sio
#import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.svm import SVC
filename = sys.argv[1]
datafile = sio.loadmat(filename)
data = datafile['bow']
sizedata=[len(data), len(data[0])]
disp = []
optimal_ks = []
#Determining the optimal number of k with gap statistic method
def gap_statistic(data):
sizedata = [len(data),len(data[0])]
SD = []
gap = []
for knum in xrange(1,20):
#I assumed that the number of clusters in my data won't be more than 20, this can be changed accordingly
print knum
#Clustering original Data
kmeanspp = KMeans(n_clusters=knum,init = 'k-means++',max_iter = 100,n_jobs = 1)
kmeanspp.fit(data)
dispersion = kmeanspp.inertia_
#Clustering Reference Data
nrefs = 10
refDisp = np.zeros(nrefs)
for nref in xrange(nrefs):
refdata = np.random.random_sample(tuple(sizedata))
refkmeans = KMeans(n_clusters=knum,init='k-means++',max_iter=100,n_jobs=1)
refkmeans.fit(refdata)
refdisp = refkmeans.inertia_
refDisp[nref]=np.log(refdisp)
mean_log_refdisp = np.mean(refDisp)
gap.append(mean_log_refdisp-np.log(dispersion))
sd = (sum([(r-m)**2 for r,m in zip(refDisp,[mean_log_refdisp]*nrefs)])/nrefs)**0.5
SD.append(sd)
SD = [sd*((1+(1/nrefs))**0.5) for sd in SD]
opt_k = None
diff = []
for i in xrange(len(gap)-1):
diff = (SD[i+1]-(gap[i+1]-gap[i]))
if diff>0:
opt_k = i+10
break
if opt_k < 20:
#print opt_k
return opt_k
else:
return 20
#Returning 20 if opt_k is more than 20 in my case, as I wanted not to search more than 20.
# Not required if range is larger.
ntrials = 50
for ntrial in xrange(ntrials):
print 'ntrial: ',ntrial
optimal_ks.append(gap_statistic(data))
#For plotting the gap statistic measure
#plt.plot(np.linspace(10,19,10,True),gap)
#plt.show()
unique_opt_k = list(set(optimal_ks))
k_count = {}
count_opt_k = 0
second_opt_k = 0
opt_k = 0
for u_o_k in unique_opt_k:
count = optimal_ks.count(u_o_k)
k_count[u_o_k]=count
if count>count_opt_k:
count_opt_k = count
opt_k = u_o_k
elif count==count_opt_k:
second_opt_k = u_o_k
print opt_k
print k_count
#Clusterin with optimal number of k
kmeanspp = KMeans(n_clusters = opt_k,init='k-means++',max_iter=100,n_jobs=1)
kmeanspp.fit(data)
centers = kmeanspp.cluster_centers_
clusterlabels = kmeanspp.labels_
print clusterlabels
mdict = {}
mdict['clusterlabels'] = clusterlabels
sio.savemat('clusterlabels.mat',mdict,format = '4',oned_as = 'column')
print 'dan dana dan done...'
| gpl-3.0 | 3,089,997,945,052,346,400 | 28.202247 | 106 | 0.696037 | false | 2.591226 | false | false | false |
dannykopping/mysql-utilities | mysql/utilities/common/tools.py | 1 | 12360 | #
# Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This module contains methods for working with mysql server tools.
"""
import os
import sys
import shutil
import time
import subprocess
import inspect
from mysql.utilities import PYTHON_MIN_VERSION, PYTHON_MAX_VERSION
from mysql.utilities.common.format import print_list
from mysql.utilities.exception import UtilError
def _add_basedir(search_paths, path_str):
"""Add a basedir and all known sub directories
This method builds a list of possible paths for a basedir for locating
special MySQL files like mysqld (mysqld.exe), etc.
search_paths[inout] List of paths to append
path_str[in] The basedir path to append
"""
search_paths.append(path_str)
search_paths.append(os.path.join(path_str, "sql")) # for source trees
search_paths.append(os.path.join(path_str, "client")) # for source trees
search_paths.append(os.path.join(path_str, "share"))
search_paths.append(os.path.join(path_str, "scripts"))
search_paths.append(os.path.join(path_str, "bin"))
search_paths.append(os.path.join(path_str, "libexec"))
search_paths.append(os.path.join(path_str, "mysql"))
def get_tool_path(basedir, tool, fix_ext=True, required=True,
defaults_paths=[], search_PATH=False):
"""Search for a MySQL tool and return the full path
basedir[in] The initial basedir to search (from mysql server)
tool[in] The name of the tool to find
fix_ext[in] If True (default is True), add .exe if running on
Windows.
required[in] If True (default is True), and error will be
generated and the utility aborted if the tool is
not found.
defaults_paths[in] Default list of paths to search for the tool.
By default an empty list is assumed, i.e. [].
search_PATH[in] Boolean value that indicates if the paths specified by
the PATH environment variable will be used to search
for the tool. By default the PATH will not be searched,
i.e. search_PATH=False.
Returns (string) full path to tool
"""
search_paths = []
if basedir:
# Add specified basedir path to search paths
_add_basedir(search_paths, basedir)
if defaults_paths and len(defaults_paths):
# Add specified default paths to search paths
for path in defaults_paths:
search_paths.append(path)
else:
# Add default basedir paths to search paths
_add_basedir(search_paths, "/usr/local/mysql/")
_add_basedir(search_paths, "/usr/sbin/")
_add_basedir(search_paths, "/usr/share/")
# Search in path from the PATH environment variable
if search_PATH:
for path in os.environ['PATH'].split(os.pathsep):
search_paths.append(path)
if os.name == "nt" and fix_ext:
tool = tool + ".exe"
# Search for the tool
for path in search_paths:
norm_path = os.path.normpath(path)
if os.path.isdir(norm_path):
toolpath = os.path.join(norm_path, tool)
if os.path.isfile(toolpath):
return toolpath
else:
if tool == "mysqld.exe":
toolpath = os.path.join(norm_path, "mysqld-nt.exe")
if os.path.isfile(toolpath):
return toolpath
if required:
raise UtilError("Cannot find location of %s." % tool)
return None
def delete_directory(dir):
"""Remove a directory (folder) and its contents.
dir[in] target directory
"""
import time
if os.path.exists(dir):
# It can take up to 10 seconds for Windows to 'release' a directory
# once a process has terminated. We wait...
if os.name == "nt":
stop = 10
i = 1
while i < stop and os.path.exists(dir):
shutil.rmtree(dir, True)
time.sleep(1)
i += 1
else:
shutil.rmtree(dir, True)
def execute_script(run_cmd, file=None, options=[], verbosity=False):
"""Execute a script.
This method spawns a subprocess to execute a script. If a file is
specified, it will direct output to that file else it will suppress
all output from the script.
run_cmd[in] command/script to execute
file[in] file path name to file, os.stdout, etc.
Default is None (do not log/write output)
options[in] arguments for script
Default is no arguments ([])
verbosity[in] show result of script
Default is False
Returns int - result from process execution
"""
if verbosity:
f_out = sys.stdout
else:
if not file:
file = os.devnull
f_out = open(file, 'w')
str_opts = [str(opt) for opt in options]
cmd_opts = " ".join(str_opts)
command = " ".join([run_cmd, cmd_opts])
if verbosity:
print "# SCRIPT EXECUTED:", command
proc = subprocess.Popen(command, shell=True,
stdout=f_out, stderr=f_out)
ret_val = proc.wait()
if not verbosity:
f_out.close()
return ret_val
def ping_host(host, timeout):
"""Execute 'ping' against host to see if it is alive.
host[in] hostname or IP to ping
timeout[in] timeout in seconds to wait
returns bool - True = host is reachable via ping
"""
if sys.platform == "darwin":
run_cmd = "ping -o -t %s %s" % (timeout, host)
elif os.name == "posix":
run_cmd = "ping -w %s %s" % (timeout, host)
else: # must be windows
run_cmd = "ping -n %s %s" % (timeout, host)
ret_val = execute_script(run_cmd)
return (ret_val == 0)
def get_mysqld_version(mysqld_path):
"""Return the version number for a mysqld executable.
mysqld_path[in] location of the mysqld executable
Returns tuple - (major, minor, release), or None if error
"""
import subprocess
args = [
" --version",
]
out = open("version_check", 'w')
proc = subprocess.Popen("%s --version" % mysqld_path,
stdout=out, stderr=out, shell=True)
proc.wait()
out.close()
out = open("version_check", 'r')
line = None
for line in out.readlines():
if "Ver" in line:
break
out.close()
try:
os.unlink('version_check')
except:
pass
if line is None:
return None
version = line.split(' ', 5)[3]
try:
maj, min, dev = version.split(".")
rel = dev.split("-")
return (maj, min, rel[0])
except:
return None
return None
def show_file_statistics(file_name, wild=False, out_format="GRID"):
"""Show file statistics for file name specified
file_name[in] target file name and path
wild[in] if True, get file statistics for all files with prefix of
file_name. Default is False
out_format[in] output format to print file statistics. Default is GRID.
"""
def _get_file_stats(path, file_name):
stats = os.stat(os.path.join(path, file_name))
return ((file_name, stats.st_size, time.ctime(stats.st_ctime),
time.ctime(stats.st_mtime)))
columns = ["File", "Size", "Created", "Last Modified"]
rows = []
path, filename = os.path.split(file_name)
if wild:
for root, dirs, files in os.walk(path):
for f in files:
if f.startswith(filename):
rows.append(_get_file_stats(path, f))
else:
rows.append(_get_file_stats(path, filename))
print_list(sys.stdout, out_format, columns, rows)
def remote_copy(filepath, user, host, local_path, verbosity=0):
"""Copy a file from a remote machine to the localhost.
filepath[in] The full path and file name of the file on the remote
machine
user[in] Remote login
local_path[in] The path to where the file is to be copie
Returns bool - True = succes, False = failure or exception
"""
if os.name == "posix": # use scp
run_cmd = "scp %s@%s:%s %s" % (user, host, filepath, local_path)
if verbosity > 1:
print("# Command =%s" % run_cmd)
print("# Copying file from %s:%s to %s:" % (host, filepath, local_path))
proc = subprocess.Popen(run_cmd, shell=True)
ret_val = proc.wait()
else:
print("Remote copy not supported. Please use UNC paths and omit "
"the --remote-login option to use a local copy operation.")
return True
def check_python_version(min_version=PYTHON_MIN_VERSION,
max_version=PYTHON_MAX_VERSION,
raise_exception_on_fail=False,
name=None):
"""Check the Python version compatibility.
By default this method uses constants to define the minimum and maximum
Python versions required. It's possible to override this by passing new
values on ``min_version`` and ``max_version`` parameters.
It will run a ``sys.exit`` or raise a ``UtilError`` if the version of
Python detected it not compatible.
min_version[in] Tuple with the minimum Python version
required (inclusive).
max_version[in] Tuple with the maximum Python version
required (exclusive).
raise_exception_on_fail[in] Boolean, it will raise a ``UtilError`` if
True and Python detected is not compatible.
name[in] String for a custom name, if not provided
will get the module name from where this
function was called.
"""
# Only use the fields: major, minor and micro
sys_version = sys.version_info[:3]
# Test min version compatibility
is_compat = min_version <= sys_version
# Test max version compatibility if it's defined
if is_compat and max_version:
is_compat = sys_version < max_version
if not is_compat:
if not name:
# Get the utility name by finding the module
# name from where this function was called
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
mod_name, ext = os.path.basename(mod.__file__).split('.')
name = '%s utility' % mod_name
# Build the error message
if max_version:
max_version_error_msg = 'or higher and lower than %s' % \
'.'.join(map(str, max_version))
else:
max_version_error_msg = 'or higher'
error_msg = (
'The %(name)s requires Python version %(min_version)s '
'%(max_version_error_msg)s. The version of Python detected was '
'%(sys_version)s. You may need to install or redirect the '
'execution of this utility to an environment that includes a '
'compatible Python version.'
) % {
'name': name,
'sys_version': '.'.join(map(str, sys_version)),
'min_version': '.'.join(map(str, min_version)),
'max_version_error_msg': max_version_error_msg
}
if raise_exception_on_fail:
raise UtilError(error_msg)
print('ERROR: %s' % error_msg)
sys.exit(1)
| gpl-2.0 | -1,920,105,852,500,991,700 | 34.014164 | 80 | 0.589401 | false | 4.029997 | false | false | false |
pearu/sympycore | sympycore/heads/base_exp_dict.py | 1 | 11228 |
__all__ = ['BASE_EXP_DICT']
from .base import heads, heads_precedence, ArithmeticHead
from ..core import init_module, Expr
init_module.import_heads()
init_module.import_numbers()
init_module.import_lowlevel_operations()
class BaseExpDictHead(ArithmeticHead):
""" BASE_EXP_DICT expression data is a dictionary of base and
exponent pairs. All base parts must be Expr instances.
For example, ``Algebra(BASE_EXP_DICT. {x:2, y:a, 3:1, 2:1/2})``
represents ``3 * 2**(1/2) * x**2 * y**a``.
"""
def is_data_ok(self, cls, data):
if type(data) is dict:
for item in data.iteritems():
msg = POW.is_data_ok(cls, item)
if msg:
return 'POW data=%s: %s' % (item, msg)
b, e = item
if b.head is POW:
return 'BASE_EXP_DICT key cannot be POW'
else:
return 'data must be dict instance but got %s' % (type(data))
return
def __repr__(self): return 'BASE_EXP_DICT'
def data_to_str_and_precedence(self, cls, base_exp_dict):
factors = []
coeff = None
for base, exp in base_exp_dict.items():
if exp==1 and base.head is NUMBER:
coeff = base.data
else:
factors.append(cls(POW, (base, exp)))
if coeff is not None:
return TERM_COEFF.data_to_str_and_precedence(cls, (cls(MUL, factors), coeff))
return MUL.data_to_str_and_precedence(cls, factors)
def reevaluate(self, cls, data):
r = cls(NUMBER, 1)
for base, exp in data.iteritems():
r *= base ** exp
return r
def to_ADD(self, Algebra, base_exp_dict, expr):
return Algebra(ADD, [expr])
def term_coeff(self, cls, expr):
data = expr.data
coeff = base_exp_dict_get_coefficient(cls, data)
if coeff is not None:
data = data.copy()
del data[coeff]
r = base_exp_dict_new(cls, data)
t, c = r.head.term_coeff(cls, r)
return t, c * coeff
return expr, 1
def new(self, cls, base_exp_dict, evaluate=True):
return base_exp_dict_new(cls, base_exp_dict)
def neg(self, cls, expr):
data = expr.data
coeff = base_exp_dict_get_coefficient(cls, data)
if coeff is None:
return cls(TERM_COEFF, (expr, -1))
data = data.copy()
del data[coeff]
return term_coeff_new(cls, (base_exp_dict_new(cls, data), -coeff))
def inplace_commutative_data_mul(self, cls, data, rhs):
"""
Multiply base-exp-dictionary with rhs inplace.
"""
rhead, rdata = rhs.pair
if rhead is SYMBOL or rhead is ADD or rhead is APPLY or rhead is DIFF or rhead is FDIFF:
base_exp_dict_add_item(cls, data, rhs, 1)
elif rhead is NUMBER:
base_exp_dict_add_item(cls, data, rhs, 1)
elif rhead is TERM_COEFF:
term, coeff = rdata
base_exp_dict_add_item(cls, data, term, 1)
base_exp_dict_add_item(cls, data, cls(NUMBER, coeff), 1)
elif rhead is BASE_EXP_DICT:
base_exp_dict_add_dict(cls, data, rdata)
elif rhead is POW:
base, exp = rdata
base_exp_dict_add_item(cls, data, base, exp)
elif rhead is TERM_COEFF_DICT:
base_exp_dict_add_item(cls, data, rhs, 1)
else:
raise NotImplementedError(`self, cls, rhs.pair`)
def commutative_mul(self, cls, lhs, rhs):
data = lhs.data.copy()
self.inplace_commutative_data_mul(cls, data, rhs)
return base_exp_dict_new(cls, data)
def commutative_mul_number(self, cls, lhs, rhs):
return term_coeff_new(cls, (lhs, rhs))
def commutative_div_number(self, cls, lhs, rhs):
r = number_div(cls, 1, rhs)
if rhs==0:
return r * lhs
return term_coeff_new(cls, (lhs, r))
def commutative_div(self, cls, lhs, rhs):
rhead, rdata = rhs.pair
if rhead is NUMBER:
return self.commutative_div_number(cls, lhs, rdata)
if rhead is POW:
data = lhs.data.copy()
base, exp = rdata
base_exp_dict_sub_item(cls, data, base, exp)
return base_exp_dict_new(cls, data)
if rhead is BASE_EXP_DICT:
data = lhs.data.copy()
base_exp_dict_sub_dict(cls, data, rdata)
return base_exp_dict_new(cls, data)
if rhead is SYMBOL or rhead is TERM_COEFF_DICT or rhead is APPLY:
data = lhs.data.copy()
base_exp_dict_sub_item(cls, data, rhs, 1)
return base_exp_dict_new(cls, data)
if rhead is TERM_COEFF:
term, coeff = rhs.term_coeff()
return (lhs / term) / coeff
return ArithmeticHead.commutative_div(self, cls, lhs, rhs)
def commutative_rdiv_number(self, cls, lhs, rhs):
data = lhs.data.copy()
base_exp_dict_mul_value(cls, data, -1)
return base_exp_dict_new(cls, data) * rhs
def scan(self, proc, cls, data, target):
for b, e in data.iteritems():
b.head.scan(proc, cls, b.data, target)
if isinstance(e, Expr):
e.head.scan(proc, cls, e.data, target)
else:
NUMBER.scan(proc, cls, e, target)
proc(cls, self, data, target)
def walk(self, func, cls, data, target):
d = {}
flag = False
for b, e in data.iteritems():
b1 = b.head.walk(func, cls, b.data, b)
if isinstance(e, Expr):
e1 = e.head.walk(func, cls, e.data, e)
else:
e1 = NUMBER.walk(func, cls, e, e)
if b1 is not b or e1 is not e:
flag = True
self.inplace_commutative_data_mul(cls, d, b1**e1)
if flag:
r = base_exp_dict_new(cls, d)
return func(cls, r.head, r.data, r)
return func(cls, self, data, target)
def pow(self, cls, base, exp):
if type(exp) is cls:
h, d = exp.pair
if h is NUMBER and isinstance(d, numbertypes):
exp = d
if isinstance(exp, inttypes):
if exp:
data = base.data.copy()
base_exp_dict_mul_value(cls, data, exp)
return base_exp_dict_new(cls, data)
return cls(NUMBER, 1)
return pow_new(cls, (base, exp))
pow_number = pow
def expand(self, cls, expr):
data = {}
for b, e in expr.data.items():
f = pow_new(cls, (b, e)).expand()
h, d = f.pair
data1 = {}
if h is TERM_COEFF_DICT:
data2 = d
else:
t, c = f.term_coeff()
data2 = {t: c}
if data:
term_coeff_dict_mul_dict(cls, data1, data, data2)
data = data1
else:
data = data2
return term_coeff_dict_new(cls, data)
def diff(self, cls, data, expr, symbol, order, cache={}):
key = (expr, symbol, order)
result = cache.get(key)
if result is not None:
return result
key1 = (expr, symbol, 1)
result = cache.get(key1)
if result is None:
operands = data.items()
zero = cls(NUMBER, 0)
result = zero
for i in range(len(operands)):
p = pow_new(cls, operands[i])
d = p.head.diff(cls, p.data, p, symbol, 1, cache=cache)
if d==zero:
continue
be_dict = data.copy()
del be_dict[operands[i][0]]
r = base_exp_dict_new(cls, be_dict)
result += r * d
cache[key1] = result
if order>1:
result = result.head.diff(cls, result.data, result, symbol, order-1, cache=cache)
cache[key] = result
return result
def apply(self, cls, data, func, args):
result = cls(NUMBER, 1)
for base, exp in data.iteritems():
if isinstance(exp, Expr):
return NotImplemented
result *= base.head.apply(cls, base.data, base, args) ** exp
return result
def integrate_indefinite(self, cls, data, expr, x):
d1 = {} # f(x)**g(x)
d2 = {} # f(x)**const
d3 = {} # const**g(x)
d4 = {} # const**const
for base, exp in data.iteritems():
if x in base.symbols_data:
if type(exp) is cls and x in exp.symbols_data:
d1[base] = exp
else:
d2[base] = exp
elif type(exp) is cls and x in exp.symbols_data:
d3[base] = exp
else:
d4[base] = exp
if d1 or (d2 and d3) or (len(d2)>1) or (len(d3)>1):
raise NotImplementedError("don't know how to integrate %s over %s" % (expr, x))
if not (d2 or d3):
return expr * cls(SYMBOL, x)
if d4:
if len(d4)>1:
const = cls(BASE_EXP_DICT, d4)
else:
const = pow_new(cls, dict_get_item(d4))
else:
const = 1
if d2:
newexpr = pow_new(cls, dict_get_item(d2))
return newexpr.head.integrate_indefinite(cls, newexpr.data, newexpr, x) * const
if d3:
newexpr = pow_new(cls, dict_get_item(d3))
return newexpr.head.integrate_indefinite(cls, newexpr.data, newexpr, x) * const
raise NotImplementedError("don't know how to integrate %s over %s" % (expr, x))
def integrate_definite(self, cls, data, expr, x, a, b):
d1 = {} # f(x)**g(x)
d2 = {} # f(x)**const
d3 = {} # const**g(x)
d4 = {} # const**const
for base, exp in data.iteritems():
if x in base.symbols_data:
if type(exp) is cls and x in exp.symbols_data:
d1[base] = exp
else:
d2[base] = exp
elif type(exp) is cls and x in exp.symbols_data:
d3[base] = exp
else:
d4[base] = exp
if d1 or (d2 and d3) or (len(d2)>1) or (len(d3)>1):
raise NotImplementedError("don't know how to integrate %s over %s in [%s, %s]" % (expr, x, a, b))
if not (d2 or d3):
return (b-a) * cls(SYMBOL, x)
if d4:
if len(d4)>1:
const = cls(BASE_EXP_DICT, d4)
else:
const = pow_new(cls, dict_get_item(d4))
else:
const = 1
if d2:
newexpr = pow_new(cls, dict_get_item(d2))
return newexpr.head.integrate_definite(cls, newexpr.data, newexpr, x, a, b) * const
if d3:
newexpr = pow_new(cls, dict_get_item(d3))
return newexpr.head.integrate_definite(cls, newexpr.data, newexpr, x, a, b) * const
raise NotImplementedError("don't know how to integrate %s over %s in [%s, %s]" % (expr, x, a, b))
BASE_EXP_DICT = BaseExpDictHead()
| bsd-3-clause | 5,856,890,903,405,124,000 | 35.813115 | 109 | 0.512647 | false | 3.48263 | false | false | false |
hugovk/terroroftinytown | terroroftinytown/tracker/base.py | 1 | 1068 | # encoding=utf-8
import tornado.web
from terroroftinytown.tracker.model import User
ACCOUNT_COOKIE_NAME = 'tottu'
ACCOUNT_TOKEN_COOKIE_NAME = 'tottt'
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
username_raw = self.get_secure_cookie(ACCOUNT_COOKIE_NAME)
token = self.get_secure_cookie(ACCOUNT_TOKEN_COOKIE_NAME)
if username_raw and token:
username = username_raw.decode('ascii')
if username and User.check_account_session(username, token):
return username
def prepare(self):
if self.application.is_maintenance_in_progress():
self._show_maintenance_page()
def _show_maintenance_page(self):
self.set_status(512, 'EXPORTING OUR SHIT')
self.render('maintenance.html')
raise tornado.web.Finish()
def user_audit_text(self, text):
return '[{username} - {ip_address}] {text}'.format(
username=self.current_user,
ip_address=self.request.remote_ip,
text=text,
)
| mit | -4,946,636,474,839,554,000 | 28.666667 | 72 | 0.640449 | false | 3.800712 | false | false | false |
yuwen41200/biodiversity-analysis | src/controller/main_action.py | 1 | 10383 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from dateutil.parser import parse
from pprint import pformat
from traceback import format_exc
from model.dataset import Dataset
from model.species import Species
from model.leaflet_map import LeafletMap
from model.scatter_plot import ScatterPlot
from view.spatial_analysis_widget import SpatialAnalysisWidget
from view.temporal_analysis_widget import TemporalAnalysisWidget
from view.cooccurrence_analysis_widget import CooccurrenceAnalysisWidget
from view.set_filters_dialog import SetFiltersDialog
from view.add_species_dialog import AddSpeciesDialog
from controller.correlation_table import CorrelationTable
from controller.cooccurrence_calculation import CooccurrenceCalculation
from lib.dataset_processor import DatasetProcessor
# noinspection PyPep8Naming
class MainAction:
def __init__(self, dataset, mainWindow, process, pipe):
"""
Initialize the controller for the main window.
:param dataset: Dataset model.
:param mainWindow: MainWindow view.
:param process: Worker subprocess.
:param pipe: Message pipe for the worker subprocess.
"""
self.spatialData = dataset.spatialData
self.temporalData = dataset.temporalData
self.auxiliaryData = dataset.auxiliaryData
self.selectedSpecies = dataset.selectedSpecies
self.map = LeafletMap(dataset, "Landscape")
self.plot = ScatterPlot(dataset)
spatial = SpatialAnalysisWidget(self.map.webView)
temporal = TemporalAnalysisWidget(self.plot.mplCanvas)
cooccurrence = CooccurrenceAnalysisWidget()
self.correlationTable = CorrelationTable(dataset, spatial, temporal)
self.cooccurrenceCalculation = CooccurrenceCalculation(
dataset, cooccurrence, process, pipe
)
self.mainWindow = mainWindow
self.mainWindow.setupWidgets(
spatial, temporal, cooccurrence, self, self.cooccurrenceCalculation
)
self.mainWindow.show()
# noinspection PyCallByClass, PyTypeChecker, PyArgumentList, PyBroadException
def importData(self):
"""
Import data from a Darwin Core Archive (DwC-A) file. |br|
Store them in ``Dataset``.
:return: None.
"""
if self.spatialData:
title = "Dataset Already Imported"
content = "To import new data, please clear data first."
self.mainWindow.alert(title, content, 3)
return
title, extension = "Select a DwC-A File", "DwC-A File (*.zip)"
filename = self.mainWindow.openFile(title, extension)
if filename:
try:
archiveData, archiveMeta = DatasetProcessor.extractDarwinCoreArchive(filename)
if archiveMeta["coreType"] not in Dataset.supportedCores:
title = "Unsupported DwC Type"
content = (
"The provided file has core type of " + archiveMeta["coreType"] + ".\n"
"This program only support " + ", ".join(Dataset.supportedCores) + "."
)
self.mainWindow.alert(title, content, 3)
return
columns = [
("individualCount", True),
("eventDate", True),
("decimalLatitude", True),
("decimalLongitude", True),
("scientificName", True),
("vernacularName", False)
]
try:
dataList = DatasetProcessor.extractCsv(archiveData, archiveMeta, columns)
except ValueError as e:
title = "Invalid DwC-A File"
content = str(e) + "\nPlease select a DwC-A file with such field."
self.mainWindow.alert(title, content, 3)
return
except:
title = "Invalid DwC-A File"
content = (
"The provided file is either not in DwC-A format or corrupted.\n"
"Please select a valid one.\n\n"
)
self.mainWindow.alert(title, content + format_exc(), 3)
return
for r in dataList:
try:
r0int = int(r[0])
r1datetime = parse(r[1])
r2float = float(r[2])
r3float = float(r[3])
if not r[4]:
raise ValueError("Field \"scientificName\" is empty.")
except:
title = "Invalid Record Found"
content = "The following record is invalid and will be ignored:\n"
self.mainWindow.alert(title, content + pformat(r), 2)
else:
self.spatialData[r[4]] = ((r2float, r3float), r0int)
self.temporalData[r[4]] = (r1datetime, r0int)
self.auxiliaryData[r[4]] = r[5]
title = "Dataset Successfully Imported"
content = "{:,d} records have been loaded.".format(len(dataList))
self.mainWindow.alert(title, content, 0)
# noinspection PyTypeChecker
def setFilters(self):
"""
Only leave filtered data in ``Dataset``.
:return: None.
"""
if not self.spatialData:
title, content = "Empty Dataset", "Please import data first."
self.mainWindow.alert(title, content, 3)
else:
xCoordinates = [n[0][1] for m in self.spatialData.values() for n in m]
yCoordinates = [n[0][0] for m in self.spatialData.values() for n in m]
timestamps = [n[0] for m in self.temporalData.values() for n in m]
xCoordinateMinMax = (min(xCoordinates), max(xCoordinates))
yCoordinateMinMax = (min(yCoordinates), max(yCoordinates))
timestampMinMax = (min(timestamps), max(timestamps))
dialog = SetFiltersDialog(xCoordinateMinMax, yCoordinateMinMax, timestampMinMax)
dialog.exec_()
if not dialog.xCoordinateMinMax:
return
for k in list(self.spatialData.keys()):
for i, u in enumerate(self.spatialData[k]):
v = self.temporalData[k][i]
if (
dialog.xCoordinateMinMax[0] <= u[0][1] <= dialog.xCoordinateMinMax[1] and
dialog.yCoordinateMinMax[0] <= u[0][0] <= dialog.yCoordinateMinMax[1] and
dialog.timestampMinMax[0] <= v[0] <= dialog.timestampMinMax[1]
):
break
else:
if k in self.selectedSpecies:
self.removeSpecies(k + " " + self.auxiliaryData[k])
del self.spatialData[k]
del self.temporalData[k]
del self.auxiliaryData[k]
self.cooccurrenceCalculation.halt()
self.plot.resetCache()
length = len([n for m in self.spatialData.values() for n in m])
title = "Filter Result"
content = "{:,d} records matches the specified range.".format(length)
self.mainWindow.alert(title, content, 0)
# noinspection PyCallByClass, PyTypeChecker, PyArgumentList
def addSpecies(self):
"""
Select a species from ``Dataset.spatialData``, append it to ``Dataset.selectedSpecies``.
:return: None.
"""
if not self.spatialData:
title, content = "Empty Dataset", "Please import data first."
self.mainWindow.alert(title, content, 3)
elif not Species.available():
title = "Too Many Species"
content = ("Selecting more than " + str(Species.nColor) +
" species is not supported.")
self.mainWindow.alert(title, content, 3)
else:
species = [(k, self.auxiliaryData[k]) for k in self.spatialData.keys()
if k not in self.selectedSpecies]
dialog = AddSpeciesDialog(species)
dialog.exec_()
if dialog.newSpecies:
newSpecies, vernacularName = dialog.newSpecies
self.selectedSpecies[newSpecies] = Species()
newColor = self.selectedSpecies[newSpecies].color
self.mainWindow.addSpeciesToLayout(newSpecies, vernacularName, newColor)
self.map.add(newSpecies)
self.map.refresh()
self.plot.rebuild()
self.correlationTable.add(newSpecies)
def removeSpecies(self, oldSpecies):
"""
Remove the specified species from ``Dataset.selectedSpecies``.
:param oldSpecies: Name of the old species to be removed.
:return: None.
"""
oldSpeciesShort = oldSpecies
for k in self.selectedSpecies.keys():
if oldSpecies.startswith(k):
oldSpeciesShort = k
del self.selectedSpecies[k]
break
self.mainWindow.removeSpeciesFromLayout(oldSpecies)
self.map.remove()
self.map.refresh()
self.plot.rebuild()
self.correlationTable.remove(oldSpeciesShort)
def clearData(self):
"""
Clear ``Dataset``.
:return: None.
"""
if not self.spatialData:
title, content = "Empty Dataset", "Please import data first."
self.mainWindow.alert(title, content, 3)
else:
self.spatialData.clear()
self.temporalData.clear()
self.auxiliaryData.clear()
self.selectedSpecies.clear()
self.mainWindow.removeSpeciesFromLayout()
self.map.rebuild()
self.map.refresh()
self.plot.resetCache()
self.plot.rebuild()
self.correlationTable.remove()
self.cooccurrenceCalculation.halt()
# noinspection PyCallByClass, PyTypeChecker, PyArgumentList
def about(self):
"""
Show information about this program.
:return: None.
"""
title = "About Biodiversity Analysis"
content = Dataset.license()
self.mainWindow.alert(title, content, 4)
| gpl-3.0 | -5,330,190,177,571,852,000 | 36.756364 | 97 | 0.571896 | false | 4.442875 | false | false | false |
brain-research/mirage-rl-bpttv | baselines/acktr/run_atari.py | 1 | 1510 | #!/usr/bin/env python
import os, logging, gym
from baselines import logger
from baselines.common import set_global_seeds
from baselines import bench
from baselines.acktr.acktr_disc import learn
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.acktr.policies import CnnPolicy
def train(env_id, num_timesteps, seed, num_cpu):
def make_env(rank):
def _thunk():
env = make_atari(env_id)
env.seed(seed + rank)
env = bench.Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))
gym.logger.setLevel(logging.WARN)
return wrap_deepmind(env)
return _thunk
set_global_seeds(seed)
env = SubprocVecEnv([make_env(i) for i in range(num_cpu)])
policy_fn = CnnPolicy
learn(policy_fn, env, seed, total_timesteps=int(num_timesteps * 1.1), nprocs=num_cpu)
env.close()
def main():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(10e6))
args = parser.parse_args()
logger.configure()
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed, num_cpu=32)
if __name__ == '__main__':
main()
| mit | 1,472,891,303,127,896,000 | 38.736842 | 100 | 0.691391 | false | 3.275488 | false | false | false |
dwavesystems/dimod | dimod/core/sampler.py | 1 | 9510 | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The :class:`.Sampler` abstract base class (see :mod:`abc`) helps you create new
dimod samplers.
Any new dimod sampler must define a subclass of :class:`.Sampler` that implements
abstract properties :attr:`~.Sampler.parameters` and :attr:`~.Sampler.properties`
and one of the abstract methods :meth:`~.Sampler.sample`, :meth:`~.Sampler.sample_ising`,
or :meth:`~.Sampler.sample_qubo`. The :class:`.Sampler` class provides the complementary
methods as mixins and ensures consistent responses.
Implemented sample methods must accept, and warn on, unknown keyword arguments
`**kwargs`. This means that all implemented sample methods must have the
`**kwargs` parameter. :meth:`~.Sampler.remove_unknown_kwargs` is a convenience
method provided for this purpose.
For example, the following steps show how to easily create a dimod sampler. It is
sufficient to implement a single method (in this example the :meth:`sample_ising` method)
to create a dimod sampler with the :class:`.Sampler` class.
.. testcode::
class LinearIsingSampler(dimod.Sampler):
def sample_ising(self, h, J, **kwargs):
kwargs = self.remove_unknown_kwargs(**kwargs)
sample = linear_ising(h, J)
energy = dimod.ising_energy(sample, h, J)
return dimod.SampleSet.from_samples([sample], vartype='SPIN', energy=[energy])
@property
def properties(self):
return dict()
@property
def parameters(self):
return dict()
For this example, the implemented sampler :meth:`~.Sampler.sample_ising` can be based on
a simple placeholder function, which returns a sample that minimizes the linear terms:
.. testcode::
def linear_ising(h, J):
sample = {}
for v in h:
if h[v] < 0:
sample[v] = +1
else:
sample[v] = -1
return sample
The :class:`.Sampler` ABC provides the other sample methods "for free"
as mixins.
>>> sampler = LinearIsingSampler()
...
... # Implemented by class LinearIsingSampler:
>>> response = sampler.sample_ising({'a': -1}, {})
...
... # Mixins provided by Sampler class:
>>> response = sampler.sample_qubo({('a', 'a'): 1})
>>> response = sampler.sample(dimod.BinaryQuadraticModel.from_ising({'a': -1}, {}))
Below is a more complex version of the same sampler, where the :attr:`properties` and
:attr:`parameters` properties return non-empty dicts.
.. testcode::
class FancyLinearIsingSampler(dimod.Sampler):
def __init__(self):
self._properties = {'description': 'a simple sampler that only considers the linear terms'}
self._parameters = {'verbose': []}
def sample_ising(self, h, J, verbose=False, **kwargs):
kwargs = self.remove_unknown_kwargs(**kwargs)
sample = linear_ising(h, J)
energy = dimod.ising_energy(sample, h, J)
if verbose:
print(sample)
return dimod.SampleSet.from_samples([sample], energy=[energy])
@property
def properties(self):
return self._properties
@property
def parameters(self):
return self._parameters
"""
import abc
import warnings
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.exceptions import InvalidSampler, SamplerUnknownArgWarning
from dimod.meta import SamplerABCMeta, samplemixinmethod
from dimod.vartypes import Vartype
__all__ = ['Sampler']
class Sampler(metaclass=SamplerABCMeta):
"""Abstract base class for dimod samplers.
Provides all methods :meth:`~.Sampler.sample`, :meth:`~.Sampler.sample_ising`,
:meth:`~.Sampler.sample_qubo` assuming at least one is implemented.
Also includes utility method :meth:`~.Sampler.remove_unknown_kwargs`, which
may be used in sample methods to handle unknown kwargs.
"""
@abc.abstractproperty # for python2 compatibility
def parameters(self):
"""dict: A dict where keys are the keyword parameters accepted by the sampler
methods and values are lists of the properties relevent to each parameter.
"""
pass
@abc.abstractproperty # for python2 compatibility
def properties(self):
"""dict: A dict containing any additional information about the sampler.
"""
pass
@samplemixinmethod
def sample(self, bqm, **parameters):
"""Sample from a binary quadratic model.
This method is inherited from the :class:`.Sampler` base class.
Converts the binary quadratic model to either Ising or QUBO format and
then invokes an implemented sampling method (one of
:meth:`.sample_ising` or :meth:`.sample_qubo`).
Args:
:obj:`.BinaryQuadraticModel`:
A binary quadratic model.
**kwargs:
See the implemented sampling for additional keyword definitions.
Unknown keywords are accepted but a warning will be raised.
Returns:
:obj:`.SampleSet`
See also:
:meth:`.sample_ising`, :meth:`.sample_qubo`
"""
# we try to use the matching sample method if possible
if bqm.vartype is Vartype.SPIN:
if not getattr(self.sample_ising, '__issamplemixin__', False):
# sample_ising is implemented
h, J, offset = bqm.to_ising()
sampleset = self.sample_ising(h, J, **parameters)
else:
Q, offset = bqm.to_qubo()
sampleset = self.sample_qubo(Q, **parameters)
elif bqm.vartype is Vartype.BINARY:
if not getattr(self.sample_qubo, '__issamplemixin__', False):
# sample_qubo is implemented
Q, offset = bqm.to_qubo()
sampleset = self.sample_qubo(Q, **parameters)
else:
h, J, offset = bqm.to_ising()
sampleset = self.sample_ising(h, J, **parameters)
else:
raise RuntimeError("binary quadratic model has an unknown vartype")
# if the vartype already matches this will just adjust the offset
return sampleset.change_vartype(bqm.vartype, energy_offset=offset)
@samplemixinmethod
def sample_ising(self, h, J, **parameters):
"""Sample from an Ising model using the implemented sample method.
This method is inherited from the :class:`.Sampler` base class.
Converts the Ising model into a :obj:`.BinaryQuadraticModel` and then
calls :meth:`.sample`.
Args:
h (dict/list):
Linear biases of the Ising problem. If a dict, should be of the
form `{v: bias, ...}` where is a spin-valued variable and `bias`
is its associated bias. If a list, it is treated as a list of
biases where the indices are the variable labels.
J (dict[(variable, variable), bias]):
Quadratic biases of the Ising problem.
**kwargs:
See the implemented sampling for additional keyword definitions.
Returns:
:obj:`.SampleSet`
See also:
:meth:`.sample`, :meth:`.sample_qubo`
"""
bqm = BinaryQuadraticModel.from_ising(h, J)
return self.sample(bqm, **parameters)
@samplemixinmethod
def sample_qubo(self, Q, **parameters):
"""Sample from a QUBO using the implemented sample method.
This method is inherited from the :class:`.Sampler` base class.
Converts the QUBO into a :obj:`.BinaryQuadraticModel` and then
calls :meth:`.sample`.
Args:
Q (dict):
Coefficients of a quadratic unconstrained binary optimization
(QUBO) problem. Should be a dict of the form `{(u, v): bias, ...}`
where `u`, `v`, are binary-valued variables and `bias` is their
associated coefficient.
**kwargs:
See the implemented sampling for additional keyword definitions.
Returns:
:obj:`.SampleSet`
See also:
:meth:`.sample`, :meth:`.sample_ising`
"""
bqm = BinaryQuadraticModel.from_qubo(Q)
return self.sample(bqm, **parameters)
def remove_unknown_kwargs(self, **kwargs):
"""Check that all `kwargs` are accepted by the sampler. If a
keyword is unknown, a warning is raised and the argument is removed.
Args:
**kwargs:
Keyword arguments to be validated.
Returns:
dict: Updated `kwargs`
"""
for kw in [k for k in kwargs if k not in self.parameters]:
msg = "Ignoring unknown kwarg: {!r}".format(kw)
warnings.warn(msg, SamplerUnknownArgWarning, stacklevel=3)
kwargs.pop(kw)
return kwargs
| apache-2.0 | -2,800,460,866,468,964,000 | 34.485075 | 103 | 0.625973 | false | 4.307065 | false | false | false |
daanwierstra/pybrain | pybrain/rl/tasks/pomdp/pomdp.py | 1 | 1502 | __author__ = 'Tom Schaul, [email protected]'
from scipy import ndarray
from pybrain.rl.tasks import EpisodicTask
from pybrain.utilities import Named, drawIndex
class POMDPTask(EpisodicTask, Named):
""" Partially observable episodic MDP (with discrete actions)
Has actions that can be performed, and observations in every state.
By default, the observation is a vector, and the actions are integers.
"""
# number of observations
observations = 4
# number of possible actions
actions = 4
# maximal number of steps before the episode is stopped
maxSteps = None
# the lower bound on the reward value
minReward = 0
def __init__(self, **args):
self.setArgs(**args)
self.steps = 0
@property
def indim(self):
return self.actions
@property
def outdim(self):
return self.observations
def reset(self):
self.steps = 0
EpisodicTask.reset(self)
def isFinished(self):
if self.maxSteps != None:
return self.steps >= self.maxSteps
return False
def performAction(self, action):
""" POMDP tasks, as they have discrete actions, can me used by providing either an index,
or an array with a 1-in-n coding (which can be stochastic). """
if type(action) == ndarray:
action = drawIndex(action, tolerant = True)
self.steps += 1
EpisodicTask.performAction(self, action) | bsd-3-clause | -4,077,918,592,508,438,500 | 27.358491 | 97 | 0.629161 | false | 4.137741 | false | false | false |
starcroce/PyAlgoDataStructure | linked_list/linked_list_to_queue.py | 1 | 1314 | import MyDoubleLinkedList
class DoubleLinkedListStack:
def __init__(self):
self.front = None
self.rear = None
self.content = None
def push(self, val):
# push to empty queue
if self.content is None:
self.content = MyDoubleLinkedList.LinkedList(val)
self.front = self.content.head
self.rear = self.content.tail
else:
self.content.insert_before(self.content.head, val)
self.front = self.content.head
def pop(self):
if self.is_empty() is True:
print 'Pop from empty queue'
return
self.content.remove(self.rear)
self.rear = self.content.tail
def is_empty(self):
return self.content is None
def print_queue(self):
if self.is_empty():
print 'None'
else:
curr = self.front
while curr != self.rear:
print str(curr.val) + ' ->',
curr = curr.next
print str(curr.val)
def main():
my_queue = DoubleLinkedListStack()
my_queue.print_queue()
for i in range(10):
my_queue.push(i)
my_queue.print_queue()
for i in range(5):
my_queue.pop()
my_queue.print_queue()
if __name__ == '__main__':
main()
| gpl-2.0 | 7,239,916,938,824,079,000 | 22.464286 | 62 | 0.541857 | false | 3.808696 | false | false | false |
aditigupta96/DealBazaar | welcome.py | 1 | 26070 | import os
import couchdb
import uuid
import requests
from datetime import datetime
from flask import Flask, jsonify, session, render_template, request, redirect, g, url_for, flash
# from .models import User
from datetime import datetime
from couchdb.mapping import Document, TextField, DateTimeField, ListField, FloatField, IntegerField, ViewField
from werkzeug.utils import secure_filename
from werkzeug import FileStorage
from flask_uploads import (UploadSet, configure_uploads, IMAGES, UploadNotAllowed)
# from cloudant.view import View
from tokens import generate_confirmation_token, confirm_token
from flask_mail import Mail
from emails import send_email
# UPLOADED_PHOTOS_DEST = 'uploads'
GOOGLE_GEOCODE_URL = 'https://maps.googleapis.com/maps/api/geocode/json?place_id={0}&key={1}'
GOOGLE_API_KEY = 'AIzaSyDVE9osSCgxkIPp4LGEp1xwhmGrMVxNpnc'
GOOGLE_DISTANCE_URL = 'https://maps.googleapis.com/maps/api/distancematrix/json?origins={0},{1}&destinations={2},{3}&key={4}'
cloudant_data = {
"username": "052ca863-0f20-49a8-9813-330b0813683a-bluemix",
"password": "68e8bdaa4739229b83095bf31b9c8256d5790022a184e8cdfefec270ea2be740",
"host": "052ca863-0f20-49a8-9813-330b0813683a-bluemix.cloudant.com",
"port": '443',
}
DATABASE_URL = "https://052ca863-0f20-49a8-9813-330b0813683a-bluemix.cloudant.com/bazaardata/"
app = Flask(__name__)
app.config.from_object(__name__)
# app.config.from_envvar('DEALBAZAAR_SETTINGS', silent=True)
app.secret_key = os.urandom(24)
mail = Mail(app)
app.config.update(
DEBUG = True,
SECURITY_PASSWORD_SALT = 'random',
BCRYPT_LOG_ROUNDS = 13,
MAIL_SERVER = 'smtp.gmail.com',
MAIL_PORT = 587,
MAIL_USE_TLS = True,
MAIL_USE_SSL = False,
MAIL_USERNAME = os.environ['DEALBAZAAR_USERNAME'],
MAIL_PASSWORD = os.environ['DEALBAZAAR_PASSWORD'],
MAIL_DEFAULT_SENDER = '[email protected]'
)
mail = Mail(app)
# uploaded_photos = UploadSet('photos', IMAGES)
# configure_uploads(app, uploaded_photos)
class User(Document):
doc_type = 'user'
name = TextField()
email = TextField()
password = TextField()
contact = IntegerField()
college = TextField()
city = TextField()
address = TextField()
confirmed = IntegerField(default=0)
createdate = DateTimeField(default=datetime.now)
latitude = TextField()
longitude = TextField()
place_id = TextField()
@classmethod
def get_user(cls,email):
db = get_db()
user = db.get(email,None)
if user is None:
return None
return cls.wrap(user)
def confirm(self):
db = get_db()
self.confirmed = 1
self.store(db)
def calculate_geocode(self):
place_id = self.place_id
data = requests.get(GOOGLE_GEOCODE_URL.format(self.place_id, GOOGLE_API_KEY))
self.latitude = str(data.json()['results'][0]['geometry']['location']['lat'])
self.longitude = str(data.json()['results'][0]['geometry']['location']['lng'])
def update(self, contact=None, password=None, city = None, college=None, address=None, placeid=None):
db = get_db()
if contact and contact != "":
self.contact = contact
if city and city != "":
self.city = city
if college and college != "":
self.college = college
if password and password != "":
self.password = password
if address and address != "" and placeid != "":
self.address = address
self.place_id = placeid
self.calculate_geocode()
self.store(db)
class Item(Document):
doc_type = TextField(default='item')
name = TextField()
item_type = TextField()
description = TextField()
original_price = FloatField()
mrp = FloatField()
date = DateTimeField(default=datetime.now)
user = TextField()
filename = TextField()
sold = IntegerField(default=0)
@classmethod
def all(cls,db):
return cls.view(db,'_design/items/_view/all-items')
def confirmSold(self,id):
db = get_db()
self.sold = 1
self.store(db)
@classmethod
def by_date(cls,limit = None):
db = get_db()
item_obj = cls.view(
db,
'_design/items/_view/byDate',
descending=True,
include_docs=True
)
items = []
for item in item_obj:
items.append(cls.wrap(item))
if limit is not None:
return items[0:limit]
return items
@classmethod
def by_user(cls,email):
db = get_db()
item_obj = cls.view(
db,
'_design/items/_view/byUser',
key=email,
include_docs=True
)
items = []
for item in item_obj:
items.append(cls.wrap(item))
return items
@classmethod
def by_item_type(cls,item_type):
db = get_db()
item_obj = cls.view(
db,
'_design/items/_view/byItemType',
key=item_type,
include_docs=True
)
items = []
for item in item_obj:
items.append(cls.wrap(item))
return items
@classmethod
def by_item_name(cls,name):
db = get_db()
item_obj = cls.view(
db,
'_design/items/_view/byItemName',
key=name,
include_docs=True
)
items = []
for item in item_obj:
items.append(cls.wrap(item))
return items
@classmethod
def get_item(cls,id):
db = get_db()
item = db.get(id,None)
if item is None:
return None
return cls.wrap(item)
def calculate_distance(self, customer_id):
customer = User.get_user(customer_id)
seller = User.get_user(self.user)
data = requests.get(GOOGLE_DISTANCE_URL.format(customer.latitude,
customer.longitude, seller.latitude,
seller.longitude, GOOGLE_API_KEY))
distance_text = str(data.json()['rows'][0]['elements'][0]['distance']['text'])
distance_value = int(data.json()['rows'][0]['elements'][0]['distance']['value'])
time = str(data.json()['rows'][0]['elements'][0]['duration']['text'])
distance = [distance_text, distance_value, time]
return distance
class Bid(Document):
doc_type = TextField(default='bid')
amount = FloatField()
user = TextField()
item = TextField()
created = DateTimeField()
@classmethod
def get_bid(cls,id):
db = get_db()
bid = db.get(id,None)
if bid is None:
return None
return cls.wrap(bid)
@classmethod
def get_by_item(cls,db,item_id):
# print '_design/bids/_view/get-bids'+item_id
bids = []
bids_obj = cls.view(
db,
'_design/bids/_view/get-bids',
key=item_id,
include_docs=True
)
for row in bids_obj:
bids.append(cls.wrap(row))
return bids
class Purchased(Document):
doc_type = TextField(default='purchase')
item_id = TextField()
buyer = TextField()
seller = TextField()
date = DateTimeField()
@classmethod
def by_user(cls,buyer):
db = get_db()
item_obj = cls.view(
db,
'_design/purchased/_view/get_byUser',
key=buyer,
include_docs=True
)
items = []
for item in item_obj:
items.append(cls.wrap(item))
return items
def get_db():
if not hasattr(g, 'db'):
server = couchdb.Server("https://"+cloudant_data['username']+':'+cloudant_data['password']
+'@'+cloudant_data['host']+':'+cloudant_data['port'])
try:
g.db = server.create('bazaardata')
except:
g.db = server['bazaardata']
return g.db
# @app.teardown_appcontext
# def close_db(error):
# if hasattr(g, 'db')
@app.before_request
def before_request():
g.user = None
if 'user' in session:
g.user = session['user']
# @app.route('/')
# def Welcome():
# return render_template('signup.html')
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if request.method == 'POST':
user = User()
form_data = request.form
print form_data
if form_data.get('name'):
user.name = form_data.get('name',None)
else:
flash('Name field is required', category = "error")
return render_template('signup.html')
if form_data.get('email'):
email = form_data.get('email',None)
if User.get_user(email) is None:
user.email = email
else:
flash("User already exists", category='error')
return render_template('signup.html')
else:
flash('Email field is required', category = "error")
return render_template('signup.html')
if form_data.get('password'):
user.password = form_data.get('password',None)
else:
flash('Password field is required', category = "error")
return render_template('signup.html')
if form_data.get('contact'):
if len(form_data.get('contact')) == 10 and int(form_data.get('contact')) > 0:
user.contact = form_data.get('contact',None)
else:
flash('Invalid Mobile Number', category = "error")
return render_template('signup.html')
else:
flash('Contact field is required', category = "error")
return render_template('signup.html')
if form_data.get('college'):
user.college = form_data.get('college',None)
else:
flash('College field is required', category = "error")
return render_template('signup.html')
if form_data.get('city'):
user.city = form_data.get('city',None)
else:
flash('City field is required', category = "error")
return render_template('signup.html')
if form_data.get('address', None):
user.address = form_data.get('address',None)
else:
flash('Address field is required', category = "error")
return render_template('signup.html')
# print "place ", form_data.get('placeid')
user.place_id = form_data.get('placeid')
# print user
user.confirmed = 0
user.calculate_geocode()
db = get_db()
db[user.email] = user._data
token = generate_confirmation_token(user.email)
confirm_url = url_for('confirm_email', token=token, _external=True)
html = render_template('activate.html', confirm_url=confirm_url)
subject = "Please confirm your email"
#print user.email
send_email(user.email, subject, html)
flash('A confirmation link is sent to your email_id.Please confirm before logging in.', category = "error")
return redirect(url_for('login'))
return render_template('signup.html')
@app.route('/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('user', None)
email = request.form['email']
# db = get_db()
user = User.get_user(email)
if user is not None:
if not user.confirmed:
flash('Please confirm your account first...!!!', category="error")
elif request.form['password'] == user.password:
session['user'] = user._data
return redirect(url_for('after_login'))
else:
flash('Invalid password', category="error")
else:
flash('Invalid email', category="error")
return render_template('login.html')
# if request.form['password'] == 'password':
# session['user'] = request.form['email']
# return redirect(url_for('after_login'))
return render_template('login.html')
@app.route('/home')
def after_login():
if g.user:
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('home1.html', recent_items = recent_items)
return redirect(url_for('login'))
@app.route('/confirm/<token>')
def confirm_email(token):
try:
# print token
email = confirm_token(token)
# print "email ",email
except:
flash('The confirmation link is invalid or has expired.', category='error')
if email:
user = User.get_user(email)
if user.confirmed:
return 'Account already confirmed. Please login.'
else:
user.confirm()
else:
flash("Unexpected error", category="error")
return redirect(url_for('login'))
@app.route('/posted_items')
def posted_items():
if g.user:
user_items = Item.by_user(g.user['email'])
for i in user_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
#print i.src
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('posted_items.html', items = user_items, recent_items=recent_items)
return redirect(url_for('login'))
@app.route('/sell', methods=['GET', 'POST'])
def post_item():
if g.user:
if request.method == 'POST':
item = Item()
form_data = request.form
if request.files.get('photo'):
photo = request.files.get('photo')
else:
flash('Image is required', category = "error")
return render_template('upload1.html')
if form_data.get('item_name'):
item.name = form_data.get('item_name',None)
else:
flash('Item Name is required', category = "error")
return render_template('upload1.html')
if form_data.get('description'):
if len(form_data.get('description')) > 25 and len(form_data.get('description')) < 251:
item.description = form_data.get('description',None)
else:
flash('Description length should be between 25-250 characters.', category = "error")
return render_template('upload1.html')
else:
flash('Description is required', category = "error")
return render_template('upload1.html')
if form_data.get('item_type'):
item.item_type = form_data.get('item_type', None).lower()
else:
flash('Item type is required', category = "error")
return render_template('upload1.html')
if int(form_data.get('original_price')) > 0:
#print "adadad"
item.original_price = form_data.get('original_price',None)
else:
#print "errrrrr"
flash('Invalid price', category = "error")
return render_template('upload1.html')
if int(form_data.get('mrp')) > 0:
#print "adadad"
item.mrp = form_data.get('mrp',None)
else:
#print "errrrrr"
flash('Invalid MRP.', category = "error")
return render_template('upload1.html')
item.user = g.user.get('email', None)
#item.date = datetime.datetime.now
db = get_db()
# try:
# filename = uploaded_photos.save(photo)
# except UploadNotAllowed:
# flash("The upload was not allowed")
# else:
# item.filename = filename
item.id = uuid.uuid4().hex
item.store(db)
db.put_attachment(item,photo,filename=str(item.name)+'.jpg',content_type='image/jpeg')
flash('Your item has been posted.', category = "error")
return redirect(url_for('after_login'))
return render_template('upload1.html')
else:
return redirect(url_for('login'))
@app.route('/view/', methods=['GET', 'POST'])
def view():
if g.user:
if request.method == 'POST':
query_text = request.form.get('search')
query_text = query_text.lower()
item_type_filter = Item.by_item_type(query_text) + Item.by_item_name(query_text)
for i in item_type_filter:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
print item_type_filter
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('search.html', items = item_type_filter, recent_items=recent_items)
else:
db = get_db()
it = Item.all(db)
for i in it:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
#print i.src
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('search.html', items = it, recent_items=recent_items)
return redirect(url_for('login'))
@app.route('/view/<id>', methods=['GET', 'POST'])
def item_details(id=None):
if request.method == 'POST':
owner = Item.get_item(id).user
if g.user['email'] == owner:
flash("You cannot place bid for this item.", category='error')
return redirect('/view/'+id)
else:
bid = Bid()
if int(request.form.get('amount')) > 0:
bid.amount = request.form.get('amount')
else:
flash('Invalid Bid', category = "error")
return redirect('/view/'+id)
bid.item = id
bid.user = g.user['email']
db = get_db()
bid.id = uuid.uuid4().hex
bid.store(db)
flash('Your bid has been placed successfully..!!!', category='error')
return redirect('/view/'+id)
else:
if(id):
db = get_db()
item = Item.get_item(id)
items = item._data
src = DATABASE_URL + id + '/' + item.name + '.jpg/'
distance = item.calculate_distance(g.user['email'])
return render_template('item_description.html', item=items, src=src, distance=distance)
@app.route('/view/<id>/bid')
def view_bids(id=None):
if g.user:
db = get_db()
bids = Bid.get_by_item(db,id)
for bid in bids:
x = User.get_user(bid.user)
bid.name = x.name
item = Item.get_item(id)
items = item._data
src = DATABASE_URL + id + '/' + item.name + '.jpg/'
flash('Buyer details have been sent to your emailid.', category='error')
return render_template('view_bids1.html',bids=bids,src=src,item=items)
else:
return redirect(url_for('login'))
@app.route('/view/<id>/bid/<bid_id>/accept', methods=['GET'])
def accept_bid(id=None, bid_id=None):
if g.user:
buyer_email = Bid.get_bid(bid_id).user
seller_email = Item.get_item(id).user
buyer = User.get_user(buyer_email)
seller = User.get_user(seller_email)
db = get_db()
item = Item.get_item(id)
items = item._data
src = DATABASE_URL + id + '/' + item.name + '.jpg/'
html = render_template('seller.html', name=buyer.name, email=buyer_email, contact=buyer.contact,
college=buyer.college, city=buyer.city, address=buyer.address,
item=items, src=src )
subject = "Buyer details"
send_email(seller_email, subject, html)
html1 = render_template('buyer.html', name=seller.name, email=seller_email, contact=seller.contact,
college=seller.college, city=seller.city, address=seller.address,
item=items, src=src)
subject1 = "Seller details"
send_email(buyer_email, subject1, html1)
item.confirmSold(id)
purchase = Purchased()
purchase.buyer = buyer_email
purchase.item_id = id
purchase.seller = seller.name
purchase.date = datetime.now()
db = get_db()
purchase.id = uuid.uuid4().hex
purchase.store(db)
print purchase
flash("Confirmation Email is sent to your email id.", category='error')
return redirect(url_for('view_bids', id=id))
return redirect(url_for('login'))
@app.route('/sold_items')
def sold_items():
if g.user:
user_items = Item.by_user(g.user['email'])
sold_items = []
for i in user_items:
if i.sold == 1:
sold_items.append(i)
for i in sold_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('sold_items.html', sold_items = sold_items, recent_items=recent_items)
return redirect(url_for('login'))
@app.route('/purchased_items')
def purchased_items():
if g.user:
purchase = Purchased.by_user(g.user['email'])
print "purchase",purchase
if len(purchase)>0:
purchased_items = []
if len(purchase) > 0:
for i in purchase:
item_id = i.item_id
item = Item.get_item(item_id)
if item:
item.seller = i.seller
item.sold_date = i.date.date()
purchased_items.append(item)
for i in purchased_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
#print purchased_items
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('purchased_items.html', items = purchased_items, recent_items=recent_items)
else:
purchased_items = []
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('purchased_items.html', items = purchased_items, recent_items=recent_items)
return redirect(url_for('login'))
@app.route('/views/<filter>', methods=['GET', 'POST'])
def filter_byLocation(filter=None):
if g.user:
db = get_db()
it = Item.all(db)
items = []
for i in it:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
i.distance = i.calculate_distance(g.user['email'])
items.append(i)
items.sort(key = lambda x : x.distance[1])
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('search.html', items = items, recent_items=recent_items)
@app.route('/logout', methods=['GET'])
def logout():
if g.user:
session.pop('user', None)
flash('You have been successfully logged out.', category="error")
return render_template('login.html')
@app.route('/settings', methods=['GET', 'POST'])
def update():
if g.user:
if request.method == "POST":
form_data = request.form
#print form_data.get('placeid') == ""
email = g.user.get('email', None)
user = User.get_user(email)
#call user update function here
user.update(form_data.get('contact', None), form_data.get('password', None),
form_data.get('city', None), form_data.get('college', None),
form_data.get('address', None), form_data.get('placeid', None))
user_data = {}
user_data['name'] = user.name
user_data['email'] = user.email
user_data['city'] = user.city
user_data['college'] = user.college
user_data['address'] = user.address
user_data['contact'] = user.contact
flash("Account details have been updated.", category="error")
return render_template('profile.html', data = user_data)
else:
email = g.user.get('email', None)
user = User.get_user(email)
user_data = {}
user_data['name'] = user.name
user_data['email'] = user.email
user_data['city'] = user.city
user_data['college'] = user.college
user_data['address'] = user.address
user_data['contact'] = user.contact
return render_template('profile.html' , data = user_data)
else:
return redirect(url_for('login'))
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port), debug=True)
| apache-2.0 | -2,056,090,969,468,917,500 | 30.909425 | 125 | 0.535405 | false | 3.8457 | false | false | false |
jtpereyda/boofuzz | boofuzz/pgraph/graph.py | 1 | 18926 | #
# pGRAPH
# Copyright (C) 2006 Pedram Amini <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import copy
import pydot
from builtins import object
from future.utils import listvalues
class Graph(object):
"""
@todo: Add support for clusters
@todo: Potentially swap node list with a node dictionary for increased performance
"""
id = None
clusters = []
edges = {}
nodes = {}
def __init__(self, graph_id=None):
self.id = graph_id
self.clusters = []
self.edges = {}
self.nodes = {}
def add_cluster(self, cluster):
"""
Add a pgraph cluster to the graph.
@type cluster: pGRAPH Cluster
@param cluster: Cluster to add to graph
"""
self.clusters.append(cluster)
return self
def add_edge(self, graph_edge, prevent_dups=True):
"""
Add a pgraph edge to the graph. Ensures a node exists for both the source and destination of the edge.
@type graph_edge: pGRAPH Edge
@param graph_edge: Edge to add to graph
@type prevent_dups: Boolean
@param prevent_dups: (Optional, Def=True) Flag controlling whether or not the addition of duplicate edges is ok
"""
if prevent_dups:
if graph_edge.id in self.edges:
return self
# ensure the source and destination nodes exist.
if self.find_node("id", graph_edge.src) is not None and self.find_node("id", graph_edge.dst) is not None:
self.edges[graph_edge.id] = graph_edge
return self
def add_graph(self, other_graph):
"""
Alias of graph_cat(). Concatenate the other graph into the current one.
@todo: Add support for clusters
@see: graph_cat()
@type other_graph: pgraph.Graph
@param other_graph: Graph to concatenate into this one.
"""
return self.graph_cat(other_graph)
def add_node(self, node):
"""
Add a pgraph node to the graph. Ensures a node with the same id does not already exist in the graph.
@type node: pGRAPH Node
@param node: Node to add to graph
"""
node.number = len(self.nodes)
if node.id not in self.nodes:
self.nodes[node.id] = node
return self
def del_cluster(self, cluster_id):
"""
Remove a cluster from the graph.
@type cluster_id: Mixed
@param cluster_id: Identifier of cluster to remove from graph
"""
for cluster in self.clusters:
if cluster.id == cluster_id:
self.clusters.remove(cluster)
break
return self
def del_edge(self, graph_id=None, src=None, dst=None):
"""
Remove an edge from the graph. There are two ways to call this routine, with an edge id::
graph.del_edge(id)
or by specifying the edge source and destination::
graph.del_edge(src=source, dst=destination)
@type graph_id: Mixed
@param graph_id: (Optional) Identifier of edge to remove from graph
@type src: Mixed
@param src: (Optional) Source of edge to remove from graph
@type dst: Mixed
@param dst: (Optional) Destination of edge to remove from graph
"""
if not graph_id:
graph_id = (src << 32) + dst # pytype: disable=unsupported-operands
if graph_id in self.edges:
del self.edges[graph_id]
return self
def del_graph(self, other_graph):
"""
Alias of graph_sub(). Remove the elements shared between the current graph and other graph from the current
graph.
@todo: Add support for clusters
@see: graph_sub()
@type other_graph: pgraph.Graph
@param other_graph: Graph to diff/remove against
"""
return self.graph_sub(other_graph)
def del_node(self, node_id):
"""
Remove a node from the graph.
@type node_id: Mixed
@param node_id: Identifier of node to remove from graph
"""
if node_id in self.nodes:
del self.nodes[node_id]
return self
def edges_from(self, edge_id):
"""
Enumerate the edges from the specified node.
@type edge_id: Mixed
@param edge_id: Identifier of node to enumerate edges from
@rtype: list
@return: List of edges from the specified node
"""
return [edge_value for edge_value in listvalues(self.edges) if edge_value.src == edge_id]
def edges_to(self, edge_id):
"""
Enumerate the edges to the specified node.
@type edge_id: Mixed
@param edge_id: Identifier of node to enumerate edges to
@rtype: list
@return: List of edges to the specified node
"""
return [edge_value for edge_value in listvalues(self.edges) if edge_value.dst == edge_id]
def find_cluster(self, attribute, value):
"""
Find and return the cluster with the specified attribute / value pair.
@type attribute: str
@param attribute: Attribute name we are looking for
@type value: Mixed
@param value: Value of attribute we are looking for
@rtype: Mixed
@return: Cluster, if attribute / value pair is matched. None otherwise.
"""
for cluster in self.clusters:
if hasattr(cluster, attribute):
if getattr(cluster, attribute) == value:
return cluster
return None
def find_cluster_by_node(self, attribute, value):
"""
Find and return the cluster that contains the node with the specified attribute / value pair.
@type attribute: str
@param attribute: Attribute name we are looking for
@type value: Mixed
@param value: Value of attribute we are looking for
@rtype: Mixed
@return: Cluster, if node with attribute / value pair is matched. None otherwise.
"""
for cluster in self.clusters:
for node in cluster:
if hasattr(node, attribute):
if getattr(node, attribute) == value:
return cluster
return None
def find_edge(self, attribute, value):
"""
Find and return the edge with the specified attribute / value pair.
@type attribute: str
@param attribute: Attribute name we are looking for
@type value: Mixed
@param value: Value of attribute we are looking for
@rtype: Mixed
@return: Edge, if attribute / value pair is matched. None otherwise.
"""
# if the attribute to search for is the id, simply return the edge from the internal hash.
if attribute == "id" and value in self.edges:
return self.edges[value]
# step through all the edges looking for the given attribute/value pair.
else:
# TODO: Verify that this actually works? Was broken when I got here ;-P
for node_edge in listvalues(self.edges):
if hasattr(node_edge, attribute):
if getattr(node_edge, attribute) == value:
return node_edge
return None
def find_node(self, attribute, value):
"""
Find and return the node with the specified attribute / value pair.
@type attribute: str
@param attribute: Attribute name we are looking for
@type value: mixed
@param value: Value of attribute we are looking for
@rtype: Mixed
@return: Node, if attribute / value pair is matched. None otherwise.
"""
# if the attribute to search for is the id, simply return the node from the internal hash.
if attribute == "id" and value in self.nodes:
return self.nodes[value]
# step through all the nodes looking for the given attribute/value pair.
else:
for node in listvalues(self.nodes):
if hasattr(node, attribute):
if getattr(node, attribute) == value:
return node
return None
def graph_cat(self, other_graph):
"""
Concatenate the other graph into the current one.
@todo: Add support for clusters
@type other_graph: pgraph.Graph
@param other_graph: Graph to concatenate into this one.
"""
for other_node in listvalues(other_graph.nodes):
self.add_node(other_node)
for other_edge in listvalues(other_graph.edges):
self.add_edge(other_edge)
return self
def graph_down(self, from_node_id, max_depth=-1):
"""
Create a new graph, looking down, from the specified node id to the specified depth.
@type from_node_id: pgraph.node
@param from_node_id: Node to use as start of down graph
@type max_depth: Integer
@param max_depth: (Optional, Def=-1) Number of levels to include in down graph (-1 for infinite)
@rtype: pgraph.Graph
@return: Down graph around specified node.
"""
down_graph = Graph()
from_node = self.find_node("id", from_node_id)
if not from_node:
print("unable to resolve node {:08x}".format(from_node_id))
raise Exception
levels_to_process = []
current_depth = 1
levels_to_process.append([from_node])
for level in levels_to_process:
next_level = []
# noinspection PyChainedComparisons
if current_depth > max_depth and max_depth != -1:
break
for node in level:
down_graph.add_node(copy.copy(node))
for edge in self.edges_from(node.id):
to_add = self.find_node("id", edge.dst)
if not down_graph.find_node("id", edge.dst):
next_level.append(to_add)
down_graph.add_node(copy.copy(to_add))
down_graph.add_edge(copy.copy(edge))
if next_level:
levels_to_process.append(next_level)
current_depth += 1
return down_graph
def graph_intersect(self, other_graph):
"""
Remove all elements from the current graph that do not exist in the other graph.
@todo: Add support for clusters
@type other_graph: pgraph.Graph
@param other_graph: Graph to intersect with
"""
for node in listvalues(self.nodes):
if not other_graph.find_node("id", node.id):
self.del_node(node.id)
for edge in listvalues(self.edges):
if not other_graph.find_edge("id", edge.id):
self.del_edge(edge.id)
return self
def graph_proximity(self, center_node_id, max_depth_up=2, max_depth_down=2):
"""
Create a proximity graph centered around the specified node.
@type center_node_id: pgraph.node
@param center_node_id: Node to use as center of proximity graph
@type max_depth_up: Integer
@param max_depth_up: (Optional, Def=2) Number of upward levels to include in proximity graph
@type max_depth_down: Integer
@param max_depth_down: (Optional, Def=2) Number of downward levels to include in proximity graph
@rtype: pgraph.Graph
@return: Proximity graph around specified node.
"""
prox_graph = self.graph_down(center_node_id, max_depth_down)
prox_graph.add_graph(self.graph_up(center_node_id, max_depth_up))
return prox_graph
def graph_sub(self, other_graph):
"""
Remove the elements shared between the current graph and other graph from the current
graph.
@todo: Add support for clusters
@type other_graph: pgraph.Graph
@param other_graph: Graph to diff/remove against
"""
for other_node in listvalues(other_graph.nodes):
self.del_node(other_node.id)
for other_edge in listvalues(other_graph.edges):
self.del_edge(None, other_edge.src, other_edge.dst)
return self
def graph_up(self, from_node_id, max_depth=-1):
"""
Create a new graph, looking up, from the specified node id to the specified depth.
@type from_node_id: pgraph.node
@param from_node_id: Node to use as start of up graph
@type max_depth: Integer
@param max_depth: (Optional, Def=-1) Number of levels to include in up graph (-1 for infinite)
@rtype: pgraph.Graph
@return: Up graph to the specified node.
"""
up_graph = Graph()
from_node = self.find_node("id", from_node_id)
levels_to_process = []
current_depth = 1
levels_to_process.append([from_node])
for level in levels_to_process:
next_level = []
# noinspection PyChainedComparisons
if current_depth > max_depth and max_depth != -1:
break
for node in level:
up_graph.add_node(copy.copy(node))
for edge in self.edges_to(node.id):
to_add = self.find_node("id", edge.src)
if not up_graph.find_node("id", edge.src):
next_level.append(to_add)
up_graph.add_node(copy.copy(to_add))
up_graph.add_edge(copy.copy(edge))
if next_level:
levels_to_process.append(next_level)
current_depth += 1
return up_graph
def render_graph_gml(self):
"""
Render the GML graph description.
:returns: GML graph description.
:rtype: str
"""
gml = 'Creator "pGRAPH - Pedram Amini <[email protected]>"\n'
gml += "directed 1\n"
# open the graph tag.
gml += "graph [\n"
# add the nodes to the GML definition.
for node in listvalues(self.nodes):
gml += node.render_node_gml()
# add the edges to the GML definition.
for edge in listvalues(self.edges):
gml += edge.render_edge_gml(self)
# close the graph tag.
gml += "\n]\n"
"""
TODO: Complete cluster rendering
# if clusters exist.
if len(self.clusters):
# open the rootcluster tag.
gml += 'rootcluster [\n'
# add the clusters to the GML definition.
for cluster in self.clusters:
gml += cluster.render()
# add the clusterless nodes to the GML definition.
for node in self.nodes:
if not self.find_cluster_by_node("id", node.id):
gml += ' vertex "%d"\n' % node.id
# close the rootcluster tag.
gml += ']\n'
"""
return gml
def render_graph_graphviz(self):
"""
Render the graphviz graph structure.
Example to create a png:
.. code-block::
with open('somefile.png', 'wb') as file:
file.write(session.render_graph_graphviz().create_png())
:returns: Pydot object representing entire graph
:rtype: pydot.Dot
"""
dot_graph = pydot.Dot()
for node in listvalues(self.nodes):
dot_graph.add_node(node.render_node_graphviz())
for edge in listvalues(self.edges):
dot_graph.add_edge(edge.render_edge_graphviz())
return dot_graph
def render_graph_udraw(self):
"""
Render the uDraw graph description.
:returns: uDraw graph description.
:rtype: str
"""
udraw = "["
# render each of the nodes in the graph.
# the individual nodes will handle their own edge rendering.
for node in listvalues(self.nodes):
udraw += node.render_node_udraw(self)
udraw += ","
# trim the extraneous comment and close the graph.
udraw = udraw[0:-1] + "\n]"
return udraw
def render_graph_udraw_update(self):
"""
Render the uDraw graph update description.
:returns: uDraw graph description.
:rtype: str
"""
udraw = "["
for node in listvalues(self.nodes):
udraw += node.render_node_udraw_update()
udraw += ","
for edge in listvalues(self.edges):
udraw += edge.render_edge_udraw_update()
udraw += ","
# trim the extraneous comment and close the graph.
udraw = udraw[0:-1] + "]"
return udraw
def update_node_id(self, current_id, new_id):
"""
Simply updating the id attribute of a node will sever the edges to / from the given node. This routine will
correctly update the edges as well.
@type current_id: long
@param current_id: Current ID of node whose ID we want to update
@type new_id: long
@param new_id: New ID to update to.
"""
if current_id not in self.nodes:
return
# update the node.
node = self.nodes[current_id]
del self.nodes[current_id]
node.id = new_id
self.nodes[node.id] = node
# update the edges.
for edge in [edge for edge in listvalues(self.edges) if current_id in (edge.src, edge.dst)]:
del self.edges[edge.id]
if edge.src == current_id:
edge.src = new_id
if edge.dst == current_id:
edge.dst = new_id
edge.id = (edge.src << 32) + edge.dst
self.edges[edge.id] = edge
def sorted_nodes(self):
"""
Return a list of the nodes within the graph, sorted by id.
@rtype: List
@return: List of nodes, sorted by id.
"""
node_keys = list(self.nodes)
node_keys.sort()
return [self.nodes[key] for key in node_keys]
| gpl-2.0 | -7,393,530,051,731,024,000 | 29.427653 | 119 | 0.57524 | false | 4.155907 | false | false | false |
mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam | PyFoam/Basics/DataStructures.py | 1 | 16332 | """Data structures in Foam-Files that can't be directly represented by Python-Structures"""
from __future__ import division
from copy import deepcopy
from collections import OrderedDict
import math
import re
# import FoamFileGenerator in the end to avoid circular dependencies
from PyFoam.ThirdParty.six import integer_types,PY3,string_types
if PY3:
def cmp(a,b):
if a<b:
return -1
elif a==b:
return 0
else:
return 1
class FoamDataType(object):
def __repr__(self):
return "'"+str(self)+"'"
def __eq__(self,other):
"""Implementation to make __cmp__ work again in Python3
Implementing this method means that these objects are not hashable.
But that is OK
"""
return self.__cmp__(other)==0
def __lt__(self,other):
"Implementation to make __cmp__ work again in Python3"
return self.__cmp__(other)<0
def __ne__(self,other):
return self.__cmp__(other)!=0
def __gt__(self,other):
return self.__cmp__(other)>0
def __ge__(self,other):
return self.__cmp__(other)>=0
def __le__(self,other):
return self.__cmp__(other)<=0
class Field(FoamDataType):
def __init__(self,val,name=None):
self.val=val
self.name=name
if type(val) in[list,UnparsedList,BinaryList]:
self.uniform=False
elif self.name==None:
self.uniform=True
else:
raise TypeError("Type",type(val),"of value",val,"can not be used to determine uniformity")
def __str__(self):
result=""
if self.uniform:
result+="uniform "
else:
result+="nonuniform "
if self.name:
result+=self.name+" "
result+=str(
PyFoam.Basics.FoamFileGenerator.FoamFileGenerator(
self.val,
longListThreshold=-1,
useFixedType=False
))
return result
def __cmp__(self,other):
if other==None or type(other)!=Field:
return 1
if self.uniform!=other.uniform:
return cmp(self.uniform,other.uniform)
elif self.name!=other.name:
return cmp(self.name,other.name)
else:
return cmp(self.val,other.val)
def __getitem__(self,key):
assert(not self.uniform)
return self.val[key]
def __setitem__(self,key,value):
assert(not self.uniform)
self.val[key]=value
def isUniform(self):
return self.uniform
def isBinary(self):
return type(self.val)==BinaryList
def binaryString(self):
return "nonuniform "+self.name+" <BINARY DATA>"
def value(self):
return self.val
def setUniform(self,data):
self.val=data
self.uniform=True
self.name=None
class Dimension(FoamDataType):
def __init__(self,*dims):
assert(len(dims)==7)
self.dims=list(dims)
def __str__(self):
result="[ "
for v in self.dims:
result+=str(v)+" "
result+="]"
return result
def __cmp__(self,other):
if other==None:
return 1
return cmp(self.dims,other.dims)
def __getitem__(self,key):
return self.dims[key]
def __setitem__(self,key,value):
self.dims[key]=value
class FixedLength(FoamDataType):
def __init__(self,vals):
self.vals=vals[:]
def __str__(self):
return "("+" ".join(["%g"%v for v in self.vals])+")"
def __cmp__(self,other):
if other==None or not issubclass(type(other),FixedLength):
return 1
return cmp(self.vals,other.vals)
def __getitem__(self,key):
return self.vals[key]
def __setitem__(self,key,value):
self.vals[key]=value
def __len__(self):
return len(self.vals)
class Vector(FixedLength):
def __init__(self,x,y,z):
FixedLength.__init__(self,[x,y,z])
def __add__(self,y):
x=self
if type(y)==Vector:
return Vector(x[0]+y[0],x[1]+y[1],x[2]+y[2])
elif type(y) in integer_types+(float,):
return Vector(x[0]+y,x[1]+y,x[2]+y)
else:
return NotImplemented
def __radd__(self,y):
x=self
if type(y) in integer_types+(float,):
return Vector(x[0]+y,x[1]+y,x[2]+y)
else:
return NotImplemented
def __sub__(self,y):
x=self
if type(y)==Vector:
return Vector(x[0]-y[0],x[1]-y[1],x[2]-y[2])
elif type(y) in integer_types+(float,):
return Vector(x[0]-y,x[1]-y,x[2]-y)
else:
return NotImplemented
def __rsub__(self,y):
x=self
if type(y) in integer_types+(float,):
return Vector(y-x[0],y-x[1],y-x[2])
else:
return NotImplemented
def __mul__(self,y):
x=self
if type(y)==Vector:
return Vector(x[0]*y[0],x[1]*y[1],x[2]*y[2])
elif type(y) in integer_types+(float,):
return Vector(x[0]*y,x[1]*y,x[2]*y)
else:
return NotImplemented
def __rmul__(self,y):
x=self
if type(y) in integer_types+(float,):
return Vector(y*x[0],y*x[1],y*x[2])
else:
return NotImplemented
def __div__(self,y):
x=self
if type(y)==Vector:
return Vector(x[0]/y[0],x[1]/y[1],x[2]/y[2])
elif type(y) in integer_types+(float,):
return Vector(x[0]/y,x[1]/y,x[2]/y)
else:
return NotImplemented
def __truediv__(self,y):
return self.__div__(y)
def __xor__(self,y):
x=self
if type(y)==Vector:
return Vector(x[1]*y[2]-x[2]*y[1],
x[2]*y[0]-x[0]*y[2],
x[0]*y[1]-x[1]*y[0])
else:
return NotImplemented
def __abs__(self):
x=self
return math.sqrt(x[0]*x[0]+x[1]*x[1]+x[2]*x[2])
def __neg__(self):
x=self
return Vector(-x[0],-x[1],-x[2])
def __pos__(self):
x=self
return Vector( x[0], x[1], x[2])
class Tensor(FixedLength):
def __init__(self,v1,v2,v3,v4,v5,v6,v7,v8,v9):
FixedLength.__init__(self,[v1,v2,v3,v4,v5,v6,v7,v8,v9])
class SymmTensor(FixedLength):
def __init__(self,v1,v2,v3,v4,v5,v6):
FixedLength.__init__(self,[v1,v2,v3,v4,v5,v6])
class BoolProxy(object):
"""Wraps a boolean parsed from a file. Optionally stores a textual
representation
"""
TrueStrings=["on",
"yes",
"true",
# "y" # this breaks parsing certain files
]
FalseStrings=[
"off",
"no",
"false",
# "n", # this breaks parsing certain files
"none",
"invalid"
]
def __init__(self,val=None,textual=None):
if val==None and textual==None:
raise TypeError("'BoolProxy' initialized without values")
elif val==None:
if textual in BoolProxy.TrueStrings:
self.val=True
elif textual in BoolProxy.FalseStrings:
self.val=False
else:
raise TypeError(str(textual)+" not in "+str(BoolProxy.TrueStrings)
+" or "+str(BoolProxy.TrueStrings))
else:
if val not in [True,False]:
raise TypeError(str(val)+" is not a boolean")
self.val=val
self.textual=textual
if self.textual:
if self.val:
if self.textual not in BoolProxy.TrueStrings:
raise TypeError(self.textual+" not in "
+str(BoolProxy.TrueStrings))
else:
if self.textual not in BoolProxy.FalseStrings:
raise TypeError(self.textual+" not in "
+str(BoolProxy.FalseStrings))
def __nonzero__(self):
return self.val
# for Python 3
def __bool__(self):
return self.val
def __str__(self):
if self.textual==None:
if self.val:
return "yes"
else:
return "no"
else:
return self.textual
def __repr__(self):
return self.__str__()
def __eq__(self,o):
if type(o) in [bool,BoolProxy]:
return self.val==o
elif isinstance(o,string_types):
if self.textual==o:
return True
else:
try:
return self.val==BoolProxy(textual=o)
except TypeError:
return False
else:
# raise TypeError("Can't compare BoolProxy with "+str(type(o)))
return self.val==o
def __ne__(self,o):
if type(o) in [bool,BoolProxy]:
return self.val!=o
elif isinstance(o,string_types):
if self.textual!=o:
return True
else:
try:
return self.val!=BoolProxy(textual=o)
except TypeError:
return False
else:
raise TypeError("Can't compare BoolProxy with "+str(type(o)))
class DictRedirection(object):
"""This class is in charge of handling redirections to other directories"""
def __init__(self,fullCopy,reference,name):
self._fullCopy=fullCopy
self._reference=reference
self._name=name
def useAsRedirect(self):
self._fullCopy=None
def getContent(self):
result=self._fullCopy
self._fullCopy=None
return result
def __call__(self):
return self._reference
def __str__(self):
return "$"+self._name
def __float__(self):
return float(self._reference)
def keys(self):
if self._fullCopy:
return self._fullCopy.keys()
else:
return self._reference.keys()
class DictProxy(dict):
"""A class that acts like a dictionary, but preserves the order
of the entries. Used to beautify the output"""
def __init__(self):
dict.__init__(self)
self._order=[]
self._decoration={}
self._regex=[]
self._redirects=[]
def isRegexp(self,key):
if type(key)==str:
if key[0]=='"' and key[-1]=='"':
return True
return False
def __setitem__(self,key,value):
if self.isRegexp(key):
exp=re.compile(key[1:-1])
self._regex=[(key,exp,value)]+self._regex
dict.__setitem__(self,key,value)
else:
dict.__setitem__(self,key,value)
if key not in self._order:
self._order.append(key)
def __getitem__(self,key):
try:
return dict.__getitem__(self,key)
except KeyError:
for k,e,v in self._regex:
if e.match(key):
return v
for r in self._redirects:
try:
return r()[key]
except KeyError:
pass
raise KeyError(key)
def __delitem__(self,key):
dict.__delitem__(self,key)
self._order.remove(key)
if key in self._decoration:
del self._decoration[key]
def __deepcopy__(self,memo):
new=DictProxy()
for k in self._order:
if type(k)==DictRedirection:
new.addRedirection(k)
else:
try:
new[k]=deepcopy(self[k],memo)
except KeyError:
new[k]=deepcopy(self.getRegexpValue(k),memo)
return new
def __contains__(self,key):
if dict.__contains__(self,key):
return True
else:
for k,e,v in self._regex:
if e.match(key):
return True
for r in self._redirects:
if key in r():
return True
return False
def __enforceString(self,v,toString):
if not isinstance(v,string_types) and toString:
r=str(v)
if isinstance(v,(list,dict)):
r='"'+r+'"'
return r
else:
return v
def update(self,other=None,toString=False,**kwargs):
"""Emulate the regular update of dict"""
if other:
if hasattr(other,"keys"):
for k in other.keys():
self[k]=self.__enforceString(other[k],toString)
else:
for k,v in other:
self[k]=self.__enforceString(v,toString)
for k in kwargs:
self[k]=self.__enforceString(kwargs[k],toString)
def keys(self):
result=[x for x in self._order if x not in self._redirects]
for r in self._redirects:
for k in r.keys():
if not k in result:
result.append(k)
return result
def __iter__(self):
s=set()
for k in self._order:
if k not in self._redirects:
s.add(k)
yield k
for r in self._redirects:
for k in r.keys():
if not k in s:
s.add(k)
yield k
def __str__(self):
first=True
result="{"
for k in self.keys():
v=self[k]
if first:
first=False
else:
result+=", "
result+="%s: %s" % (repr(k),repr(v))
result+="}"
return result
def iteritems(self):
lst=[]
for k in self:
lst.append((k,self[k]))
return lst
# needed for python 3. Should be a generator, but ...
def items(self):
return self.iteritems()
def addDecoration(self,key,text):
if key in self:
if key not in self._decoration:
self._decoration[key]=""
self._decoration[key]+=text
def getDecoration(self,key):
if key in self._decoration:
return " \t"+self._decoration[key]
else:
return ""
def getRegexpValue(self,key):
for k,e,v in self._regex:
if k==key:
return v
raise KeyError(key)
def addRedirection(self,redir):
self._order.append(redir)
redir.useAsRedirect()
self._redirects.append(redir)
class TupleProxy(list):
"""Enables Tuples to be manipulated"""
def __init__(self,tup=()):
list.__init__(self,tup)
class Unparsed(object):
"""A class that encapsulates an unparsed string"""
def __init__(self,data):
self.data=data
def __str__(self):
return self.data
def __hash__(self):
return hash(self.data)
def __lt__(self,other):
return self.data<other.data
class BinaryBlob(Unparsed):
"""Represents a part of the file with binary data in it"""
def __init__(self,data):
Unparsed.__init__(self,data)
class Codestream(str):
"""A class that encapsulates an codestream string"""
def __str__(self):
return "#{" + str.__str__(self) + "#}"
class UnparsedList(object):
"""A class that encapsulates a list that was not parsed for
performance reasons"""
def __init__(self,lngth,data):
self.data=data
self.length=lngth
def __len__(self):
return self.length
def __cmp__(self,other):
return cmp(self.data,other.data)
def __eq__(self,other):
return self.data==other.data
def __lt__(self,other):
return self.data<other.data
class BinaryList(UnparsedList):
"""A class that represents a list that is saved as binary data"""
def __init__(self,lngth,data):
UnparsedList.__init__(self,lngth,data)
def makePrimitiveString(val):
"""Make strings of types that might get written to a directory"""
if isinstance(val,(Dimension,FixedLength,BoolProxy)):
return str(val)
else:
return val
# Moved to the end to avoid circular dependencies
import PyFoam.Basics.FoamFileGenerator
# Should work with Python3 and Python2
| gpl-2.0 | -4,579,020,472,537,835,500 | 25.995041 | 102 | 0.515246 | false | 3.879335 | false | false | false |
jtraver/dev | python3/selenium/apihelper.py | 1 | 1826 | #!/usr/bin/env python3
#!/usr/bin/python
"""Cheap and simple API helper
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim ([email protected])"
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2004/05/05 21:57:19 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
# While this is a good example script to teach about introspection,
# in real life it has been superceded by PyDoc, which is part of the
# standard library in Python 2.1 and later.
#
# Your IDE may already import the "help" function from pydoc
# automatically on startup; if not, do this:
#
# >>> from pydoc import help
#
# The help function in this module takes the object itself to get
# help on, but PyDoc can also take a string, like this:
#
# >>> help("string") # gets help on the string module
# >>> help("apihelper.help") # gets help on the function below
# >>> help() # enters an interactive help mode
#
# PyDoc can also act as an HTTP server to dynamically produce
# HTML-formatted documentation of any module in your path.
# That's wicked cool. Read more about PyDoc here:
# http://www.onlamp.com/pub/a/python/2001/04/18/pydoc.html
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [e for e in dir(object) if callable(getattr(object, e))]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print("\n".join(["\n%s\n\t%s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]))
if __name__ == "__main__":
print(help.__doc__)
| mit | 5,549,527,252,694,956,000 | 34.803922 | 79 | 0.664294 | false | 3.445283 | false | false | false |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/resources/types/domain_category.py | 1 | 3952 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.resources',
marshal='google.ads.googleads.v7',
manifest={
'DomainCategory',
},
)
class DomainCategory(proto.Message):
r"""A category generated automatically by crawling a domain. If a
campaign uses the DynamicSearchAdsSetting, then domain
categories will be generated for the domain. The categories can
be targeted using WebpageConditionInfo. See:
https://support.google.com/google-ads/answer/2471185
Attributes:
resource_name (str):
Output only. The resource name of the domain category.
Domain category resource names have the form:
``customers/{customer_id}/domainCategories/{campaign_id}~{category_base64}~{language_code}``
campaign (str):
Output only. The campaign this category is
recommended for.
category (str):
Output only. Recommended category for the
website domain. e.g. if you have a website about
electronics, the categories could be "cameras",
"televisions", etc.
language_code (str):
Output only. The language code specifying the
language of the website. e.g. "en" for English.
The language can be specified in the
DynamicSearchAdsSetting required for dynamic
search ads. This is the language of the pages
from your website that you want Google Ads to
find, create ads for, and match searches with.
domain (str):
Output only. The domain for the website. The
domain can be specified in the
DynamicSearchAdsSetting required for dynamic
search ads.
coverage_fraction (float):
Output only. Fraction of pages on your site
that this category matches.
category_rank (int):
Output only. The position of this category in
the set of categories. Lower numbers indicate a
better match for the domain. null indicates not
recommended.
has_children (bool):
Output only. Indicates whether this category
has sub-categories.
recommended_cpc_bid_micros (int):
Output only. The recommended cost per click
for the category.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
campaign = proto.Field(
proto.STRING,
number=10,
optional=True,
)
category = proto.Field(
proto.STRING,
number=11,
optional=True,
)
language_code = proto.Field(
proto.STRING,
number=12,
optional=True,
)
domain = proto.Field(
proto.STRING,
number=13,
optional=True,
)
coverage_fraction = proto.Field(
proto.DOUBLE,
number=14,
optional=True,
)
category_rank = proto.Field(
proto.INT64,
number=15,
optional=True,
)
has_children = proto.Field(
proto.BOOL,
number=16,
optional=True,
)
recommended_cpc_bid_micros = proto.Field(
proto.INT64,
number=17,
optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 2,978,796,001,538,029,000 | 30.870968 | 104 | 0.620445 | false | 4.435466 | false | false | false |
spawnedc/MeCanBlog | dbindexer/lookups.py | 1 | 8808 | from django.db import models
from djangotoolbox.fields import ListField
from copy import deepcopy
import re
regex = type(re.compile(''))
class LookupDoesNotExist(Exception):
pass
class LookupBase(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if not isinstance(new_cls.lookup_types, (list, tuple)):
new_cls.lookup_types = (new_cls.lookup_types, )
return new_cls
class ExtraFieldLookup(object):
'''Default is to behave like an exact filter on an ExtraField.'''
__metaclass__ = LookupBase
lookup_types = 'exact'
def __init__(self, model=None, field_name=None, lookup_def=None,
new_lookup='exact', field_to_add=models.CharField(
max_length=500, editable=False, null=True)):
self.field_to_add = field_to_add
self.new_lookup = new_lookup
self.contribute(model, field_name, lookup_def)
def contribute(self, model, field_name, lookup_def):
self.model = model
self.field_name = field_name
self.lookup_def = lookup_def
@property
def index_name(self):
return 'idxf_%s_l_%s' % (self.field_name, self.lookup_types[0])
def convert_lookup(self, value, lookup_type):
# TODO: can value be a list or tuple? (in case of in yes)
if isinstance(value, (tuple, list)):
value = [self._convert_lookup(val, lookup_type)[1] for val in value]
else:
_, value = self._convert_lookup(value, lookup_type)
return self.new_lookup, value
def _convert_lookup(self, value, lookup_type):
return lookup_type, value
def convert_value(self, value):
if value is not None:
if isinstance(value, (tuple, list)):
value = [self._convert_value(val) for val in value]
else:
value = self._convert_value(value)
return value
def _convert_value(self, value):
return value
def matches_filter(self, model, field_name, lookup_type, value):
return self.model == model and lookup_type in self.lookup_types \
and field_name == self.field_name
@classmethod
def matches_lookup_def(cls, lookup_def):
if lookup_def in cls.lookup_types:
return True
return False
def get_field_to_add(self, field_to_index):
field_to_add = deepcopy(self.field_to_add)
if isinstance(field_to_index, ListField):
field_to_add = ListField(field_to_add, editable=False, null=True)
return field_to_add
class DateLookup(ExtraFieldLookup):
def __init__(self, *args, **kwargs):
defaults = {'new_lookup': 'exact',
'field_to_add': models.IntegerField(editable=False, null=True)}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value
class Day(DateLookup):
lookup_types = 'day'
def _convert_value(self, value):
return value.day
class Month(DateLookup):
lookup_types = 'month'
def _convert_value(self, value):
return value.month
class Year(DateLookup):
lookup_types = 'year'
def _convert_value(self, value):
return value.year
class Weekday(DateLookup):
lookup_types = 'week_day'
def _convert_value(self, value):
return value.isoweekday()
class Contains(ExtraFieldLookup):
lookup_types = 'contains'
def __init__(self, *args, **kwargs):
defaults = {'new_lookup': 'startswith',
'field_to_add': ListField(models.CharField(500),
editable=False, null=True)
}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def get_field_to_add(self, field_to_index):
# always return a ListField of CharFields even in the case of
# field_to_index being a ListField itself!
return deepcopy(self.field_to_add)
def convert_value(self, value):
new_value = []
if isinstance(value, (tuple, list)):
for val in value:
new_value.extend(self.contains_indexer(val))
else:
new_value = self.contains_indexer(value)
return new_value
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value
def contains_indexer(self, value):
# In indexing mode we add all postfixes ('o', 'lo', ..., 'hello')
result = []
if value:
result.extend([value[count:] for count in range(len(value))])
return result
class Icontains(Contains):
lookup_types = 'icontains'
def convert_value(self, value):
return [val.lower() for val in Contains.convert_value(self, value)]
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value.lower()
class Iexact(ExtraFieldLookup):
lookup_types = 'iexact'
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value.lower()
def _convert_value(self, value):
return value.lower()
class Istartswith(ExtraFieldLookup):
lookup_types = 'istartswith'
def __init__(self, *args, **kwargs):
defaults = {'new_lookup': 'startswith'}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value.lower()
def _convert_value(self, value):
return value.lower()
class Endswith(ExtraFieldLookup):
lookup_types = 'endswith'
def __init__(self, *args, **kwargs):
defaults = {'new_lookup': 'startswith'}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value[::-1]
def _convert_value(self, value):
return value[::-1]
class Iendswith(Endswith):
lookup_types = 'iendswith'
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value[::-1].lower()
def _convert_value(self, value):
return value[::-1].lower()
class RegexLookup(ExtraFieldLookup):
lookup_types = ('regex', 'iregex')
def __init__(self, *args, **kwargs):
defaults = {'field_to_add': models.NullBooleanField(editable=False,
null=True)
}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def contribute(self, model, field_name, lookup_def):
ExtraFieldLookup.contribute(self, model, field_name, lookup_def)
if isinstance(lookup_def, regex):
self.lookup_def = re.compile(lookup_def.pattern, re.S | re.U |
(lookup_def.flags & re.I))
@property
def index_name(self):
return 'idxf_%s_l_%s' % (self.field_name,
self.lookup_def.pattern.encode('hex'))
def is_icase(self):
return self.lookup_def.flags & re.I
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, True
def _convert_value(self, value):
if self.lookup_def.match(value):
return True
return False
def matches_filter(self, model, field_name, lookup_type, value):
return self.model == model and lookup_type == \
'%sregex' % ('i' if self.is_icase() else '') and \
value == self.lookup_def.pattern and field_name == self.field_name
@classmethod
def matches_lookup_def(cls, lookup_def):
if isinstance(lookup_def, regex):
return True
return False
class StandardLookup(ExtraFieldLookup):
''' Creates a copy of the field_to_index in order to allow querying for
standard lookup_types on a JOINed property. '''
# TODO: database backend can specify standardLookups
lookup_types = ('exact', 'gt', 'gte', 'lt', 'lte', 'in', 'range', 'isnull')
@property
def index_name(self):
return 'idxf_%s_l_%s' % (self.field_name, 'standard')
def convert_lookup(self, value, lookup_type):
return lookup_type, value
def get_field_to_add(self, field_to_index):
field_to_add = deepcopy(field_to_index)
if isinstance(field_to_add, (models.DateTimeField,
models.DateField, models.TimeField)):
field_to_add.auto_now_add = field_to_add.auto_now = False
return field_to_add
| bsd-3-clause | 6,995,313,191,681,633,000 | 32.618321 | 83 | 0.592643 | false | 3.878468 | false | false | false |
googleads/google-ads-python | google/ads/googleads/v8/services/types/geo_target_constant_service.py | 1 | 5566 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.resources.types import (
geo_target_constant as gagr_geo_target_constant,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={
"GetGeoTargetConstantRequest",
"SuggestGeoTargetConstantsRequest",
"SuggestGeoTargetConstantsResponse",
"GeoTargetConstantSuggestion",
},
)
class GetGeoTargetConstantRequest(proto.Message):
r"""Request message for
[GeoTargetConstantService.GetGeoTargetConstant][google.ads.googleads.v8.services.GeoTargetConstantService.GetGeoTargetConstant].
Attributes:
resource_name (str):
Required. The resource name of the geo target
constant to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
class SuggestGeoTargetConstantsRequest(proto.Message):
r"""Request message for
[GeoTargetConstantService.SuggestGeoTargetConstants][google.ads.googleads.v8.services.GeoTargetConstantService.SuggestGeoTargetConstants].
Attributes:
locale (str):
If possible, returned geo targets are
translated using this locale. If not, en is used
by default. This is also used as a hint for
returned geo targets.
country_code (str):
Returned geo targets are restricted to this
country code.
location_names (google.ads.googleads.v8.services.types.SuggestGeoTargetConstantsRequest.LocationNames):
The location names to search by. At most 25
names can be set.
geo_targets (google.ads.googleads.v8.services.types.SuggestGeoTargetConstantsRequest.GeoTargets):
The geo target constant resource names to
filter by.
"""
class LocationNames(proto.Message):
r"""A list of location names.
Attributes:
names (Sequence[str]):
A list of location names.
"""
names = proto.RepeatedField(proto.STRING, number=2,)
class GeoTargets(proto.Message):
r"""A list of geo target constant resource names.
Attributes:
geo_target_constants (Sequence[str]):
A list of geo target constant resource names.
"""
geo_target_constants = proto.RepeatedField(proto.STRING, number=2,)
locale = proto.Field(proto.STRING, number=6, optional=True,)
country_code = proto.Field(proto.STRING, number=7, optional=True,)
location_names = proto.Field(
proto.MESSAGE, number=1, oneof="query", message=LocationNames,
)
geo_targets = proto.Field(
proto.MESSAGE, number=2, oneof="query", message=GeoTargets,
)
class SuggestGeoTargetConstantsResponse(proto.Message):
r"""Response message for
[GeoTargetConstantService.SuggestGeoTargetConstants][google.ads.googleads.v8.services.GeoTargetConstantService.SuggestGeoTargetConstants].
Attributes:
geo_target_constant_suggestions (Sequence[google.ads.googleads.v8.services.types.GeoTargetConstantSuggestion]):
Geo target constant suggestions.
"""
geo_target_constant_suggestions = proto.RepeatedField(
proto.MESSAGE, number=1, message="GeoTargetConstantSuggestion",
)
class GeoTargetConstantSuggestion(proto.Message):
r"""A geo target constant suggestion.
Attributes:
locale (str):
The language this GeoTargetConstantSuggestion
is currently translated to. It affects the name
of geo target fields. For example, if locale=en,
then name=Spain. If locale=es, then name=España.
The default locale will be returned if no
translation exists for the locale in the
request.
reach (int):
Approximate user population that will be
targeted, rounded to the nearest 100.
search_term (str):
If the request searched by location name,
this is the location name that matched the geo
target.
geo_target_constant (google.ads.googleads.v8.resources.types.GeoTargetConstant):
The GeoTargetConstant result.
geo_target_constant_parents (Sequence[google.ads.googleads.v8.resources.types.GeoTargetConstant]):
The list of parents of the geo target
constant.
"""
locale = proto.Field(proto.STRING, number=6, optional=True,)
reach = proto.Field(proto.INT64, number=7, optional=True,)
search_term = proto.Field(proto.STRING, number=8, optional=True,)
geo_target_constant = proto.Field(
proto.MESSAGE,
number=4,
message=gagr_geo_target_constant.GeoTargetConstant,
)
geo_target_constant_parents = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=gagr_geo_target_constant.GeoTargetConstant,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -4,300,285,635,369,971,000 | 35.854305 | 142 | 0.679245 | false | 4.231939 | false | false | false |
novirael/os-simulation | memory/simulation.py | 1 | 1520 | from copy import copy
from random import randint
from memory import (
FirstInFirstOutAlgorithm,
TheOptimalAlgorithm,
LastRecentlyUsedAlgorithm,
ApproximalLastRecentlyUsedAlgorithm,
RandomAlgorithm
)
PAGE_SIZE = 100
FRAMES = 10
NUM_REQUESTS = 1000
def test(page_size, frames_size, num_requests, draw=False):
summary = {}
query = [randint(1, page_size+1) for _ in range(num_requests)]
algorithms = [
FirstInFirstOutAlgorithm(copy(query), frames_size),
TheOptimalAlgorithm(copy(query), frames_size),
LastRecentlyUsedAlgorithm(copy(query), frames_size),
ApproximalLastRecentlyUsedAlgorithm(copy(query), frames_size),
RandomAlgorithm(copy(query), frames_size)
]
for alg in algorithms:
alg.execute()
if draw:
print 'Page faults for {title}: {faults}/{requests}'.format(
title=alg.title,
faults=alg.page_faults,
requests=num_requests
)
summary[alg.title] = alg.page_faults
return summary
def statistic(frames, times=50):
stat = {}
for i in range(times):
results = test(PAGE_SIZE, frames, NUM_REQUESTS)
if not stat:
stat = copy(results)
else:
for alg, result in results.iteritems():
stat[alg] += result
print stat
if __name__ == "__main__":
# test(PAGE_SIZE, FRAMES, NUM_REQUESTS, draw=True)
for frames in [10, 20, 30, 40]:
statistic(frames)
| mit | -3,993,133,811,072,129,000 | 24.333333 | 72 | 0.617105 | false | 3.828715 | false | false | false |
mattsep/TDSE | src/animate.py | 1 | 1444 | import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# animation of the probability density of the wavefunction over the course
# of time
def probabilityDensity(x, t, V, psi):
# convert to the probability density
Nt = len(t)
rho = sp.real(sp.conjugate(psi)*psi)
# set the first frame properties and grab the line handles
fig, ax = plt.subplots()
line1, line2, line3, line4 = ax.plot(x, rho[:,1], 'k',
x, sp.real(psi[:,1]), 'b:',
x, sp.imag(psi[:,1]), 'r:',
x, V, 'm--',
linewidth=2.0)
ax.set_xlabel("Position")
ax.set_ylabel("Probability Density")
ax.set_ylim([-rho.max(), rho.max()])
ax.set_xlim([min(x), max(x)])
# the animation function, to be called repeatedly
def animate(i):
# set the new data each frame
line1.set_ydata(rho[:,i])
line2.set_ydata(sp.real(psi[:,i]))
line3.set_ydata(sp.imag(psi[:,i]))
return line1, line2, line3
# the initialization function, useful when blit=True
def init():
line1.set_ydata(sp.ma.array(x, mask=True))
line2.set_ydata(sp.ma.array(x, mask=True))
line3.set_ydata(sp.ma.array(x, mask=True))
return line1, line2, line3
# perform the animation
ani = animation.FuncAnimation(fig, animate, sp.arange(1,Nt),
init_func=init, interval=25, blit=True)
plt.show()
| gpl-3.0 | 1,779,452,634,299,674,600 | 31.088889 | 74 | 0.607341 | false | 3.266968 | false | false | false |
liumengjun/django-static-precompiler | static_precompiler/templatetags/base.py | 1 | 1478 | import inspect
try:
# Django>=1.9
from django.template import library
except ImportError:
# Django<1.9
from django.template import base as library
def container_tag(register, name=None):
def dec(func):
params, varargs, varkw, defaults = inspect.getargspec(func)
params = params[1:]
tag_name = name or func.__name__
class InlineCompileNode(library.TagHelperNode):
def __init__(self, nodelist, *args):
super(InlineCompileNode, self).__init__(*args)
self.nodelist = nodelist
def render(self, context):
args, kwargs = self.get_resolved_arguments(context)
return func(self.nodelist, *args, **kwargs)
def compile_func(parser, token):
takes_context = True
bits = token.split_contents()[1:]
args, kwargs = library.parse_bits(parser, bits, params, varargs, varkw,
defaults, takes_context, tag_name)
nodelist = parser.parse(('end' + tag_name,))
parser.delete_first_token()
try:
# Django<1.9
return InlineCompileNode(nodelist, takes_context, args, kwargs)
except TypeError:
# Django>=1.9
return InlineCompileNode(nodelist, func, takes_context, args, kwargs)
register.tag(tag_name, compile_func)
return func
return dec
| mit | 4,570,853,144,020,041,700 | 32.590909 | 85 | 0.567659 | false | 4.492401 | false | false | false |
JustinSGray/Kona | src/kona/linalg/matrices/preconds/nested.py | 1 | 1272 | from kona.linalg.matrices.common import IdentityMatrix
from kona.linalg.matrices.hessian import ReducedKKTMatrix
class NestedKKTPreconditioner(ReducedKKTMatrix):
"""
This object preconditions the KKT system by doing approximate solutions
of the 2nd order adjoints using the PDE preconditioner.
The approximate product using the approximate adjoints are then used in a
nested Krylov solver to produce an inverse estimate.
"""
def _linear_solve(self, rhs_vec, solution, rel_tol=1e-8):
self.dRdU.linearize(self.at_design, self.at_state)
self.dRdU.precond(rhs_vec, solution)
def _adjoint_solve(self, rhs_vec, solution, rel_tol=1e-8):
self.dRdU.linearize(self.at_design, self.at_state)
self.dRdU.T.precond(rhs_vec, solution)
def solve(self, rhs, solution, rel_tol=None):
# make sure we have a krylov solver
if self.krylov is None:
raise AttributeError('krylov solver not set')
# set tolerance
if isinstance(rel_tol, float):
self.krylov.rel_tol = rel_tol
# define the preconditioner
eye = IdentityMatrix()
precond = eye.product
# trigger the solution
self.krylov.solve(self.product, rhs, solution, precond)
| lgpl-3.0 | 340,065,703,489,517,200 | 36.411765 | 77 | 0.683962 | false | 3.553073 | false | false | false |
DeepSOIC/Lattice | latticeBaseFeature.py | 1 | 11471 | #***************************************************************************
#* *
#* Copyright (c) 2015 - Victor Titov (DeepSOIC) *
#* <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="Base feature module for lattice object of lattice workbench for FreeCAD"
__author__ = "DeepSOIC"
__url__ = ""
import FreeCAD as App
import Part
from latticeCommon import *
import latticeCompoundExplorer as LCE
import latticeMarkers
import latticeExecuter
def getDefLatticeFaceColor():
return (1.0, 0.7019608020782471, 0.0, 0.0) #orange
def getDefShapeColor():
clr = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/View").GetUnsigned("DefaultShapeColor")
#convert color in int to color in tuple of 4 floats.
#This is probably implemented already somewhere, but I couldn't find, so I rolled my own --DeepSOIC
# clr in hex looks like this: 0xRRGGBBOO (r,g,b,o = red, green, blue, opacity)
o = clr & 0x000000FFL
b = (clr >> 8) & 0x000000FFL
g = (clr >> 16) & 0x000000FFL
r = (clr >> 24) & 0x000000FFL
return (r/255.0, g/255.0, b/255.0, (255-o)/255.0)
def makeLatticeFeature(name, AppClass, ViewClass):
'''makeLatticeFeature(name, AppClass, ViewClass = None): makes a document object for a LatticeFeature-derived object.'''
obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name)
AppClass(obj)
if ViewClass:
vp = ViewClass(obj.ViewObject)
else:
vp = ViewProviderLatticeFeature(obj.ViewObject)
return obj
def isObjectLattice(documentObject):
'''isObjectLattice(documentObject): When operating on the object, it is to be treated as a lattice object. If False, treat as a regular shape.'''
ret = False
if hasattr(documentObject,"isLattice"):
if 'On' in documentObject.isLattice:
ret = True
return ret
def getMarkerSizeEstimate(ListOfPlacements):
'''getMarkerSizeEstimate(ListOfPlacements): computes the default marker size for the array of placements'''
if len(ListOfPlacements) == 0:
return 1.0
pathLength = 0
for i in range(1, len(ListOfPlacements)):
pathLength += (ListOfPlacements[i].Base - ListOfPlacements[i-1].Base).Length
sz = pathLength/len(ListOfPlacements)/2.0
#FIXME: make hierarchy-aware
if sz < DistConfusion*10:
sz = 1.0
return sz
class LatticeFeature():
"Base object for lattice objects (arrays of placements)"
def __init__(self,obj):
# please, don't override. Override derivedInit instead.
self.Type = "latticeFeature"
prop = "NumElements"
obj.addProperty("App::PropertyInteger",prop,"Lattice","Info: number of placements in the array")
obj.setEditorMode(prop, 1) # set read-only
obj.addProperty("App::PropertyLength","MarkerSize","Lattice","Size of placement markers (set to zero for automatic).")
obj.addProperty("App::PropertyEnumeration","MarkerShape","Lattice","Choose the preferred shape of placement markers.")
obj.MarkerShape = ["tetra-orimarker","paperplane-orimarker"]
obj.MarkerShape = "paperplane-orimarker" #TODO: setting for choosing the default
obj.addProperty("App::PropertyEnumeration","isLattice","Lattice","Sets whether this object should be treated as a lattice by further operations")
obj.isLattice = ['Auto-Off','Auto-On','Force-Off','Force-On']
# Auto-On an Auto-Off can be modified when recomputing. Force values are going to stay.
#Hidden properties affecting some standard behaviours
prop = "SingleByDesign"
obj.addProperty("App::PropertyBool",prop,"Lattice","Makes the element be populated into object's Placement property")
obj.setEditorMode(prop, 2) # set hidden
self.derivedInit(obj)
obj.Proxy = self
def derivedInit(self, obj):
'''for overriding by derived classes'''
pass
def execute(self,obj):
# please, don't override. Override derivedExecute instead.
plms = self.derivedExecute(obj)
if plms is not None:
obj.NumElements = len(plms)
shapes = []
markerSize = obj.MarkerSize
if markerSize < DistConfusion:
markerSize = getMarkerSizeEstimate(plms)
marker = latticeMarkers.getPlacementMarker(scale= markerSize, markerID= obj.MarkerShape)
#FIXME: make hierarchy-aware
if obj.SingleByDesign:
if len(plms) != 1:
latticeExecuter.warning(obj,"Multiple placements are being fed, but object is single by design. Only fisrt placement will be used...")
obj.Shape = marker.copy()
obj.Placement = plms[0]
else:
for plm in plms:
sh = marker.copy()
sh.Placement = plm
shapes.append(sh)
if len(shapes) == 0:
obj.Shape = latticeMarkers.getNullShapeShape(markerSize)
raise ValueError('Lattice object is null') #Feeding empty compounds to FreeCAD seems to cause rendering issues, otherwise it would have been a good idea to output nothing.
sh = Part.makeCompound(shapes)
obj.Shape = sh
if obj.isLattice == 'Auto-Off':
obj.isLattice = 'Auto-On'
else:
# DerivedExecute didn't return anything. Thus we assume it
# has assigned the shape, and thus we don't do anything.
# Moreover, we assume that it is no longer a lattice object, so:
if obj.isLattice == 'Auto-On':
obj.isLattice = 'Auto-Off'
obj.NumElements = len(obj.Shape.childShapes(False,False))
return
def derivedExecute(self,obj):
'''For overriding by derived class. If this returns a list of placements,
it's going to be used to build the shape. If returns None, it is assumed that
derivedExecute has already assigned the shape, and no further actions are needed.
Moreover, None is a signal that the object is not a lattice array, and it will
morph into a non-lattice if isLattice is set to auto'''
return []
def verifyIntegrity(self):
if self.__init__.__func__ is not LatticeFeature.__init__.__func__:
FreeCAD.Console.PrintError("__init__() of lattice object is overridden. Please don't! Fix it!\n")
if self.execute.__func__ is not LatticeFeature.execute.__func__:
FreeCAD.Console.PrintError("execute() of lattice object is overridden. Please don't! Fix it!\n")
def onChanged(self, obj, prop): #prop is a string - name of the property
if prop == 'isLattice':
if obj.ViewObject is not None:
try:
if isObjectLattice(obj):
#obj.ViewObject.DisplayMode = 'Shaded'
obj.ViewObject.ShapeColor = getDefLatticeFaceColor()
obj.ViewObject.Lighting = 'One side'
else:
#obj.ViewObject.DisplayMode = 'Flat Lines'
obj.ViewObject.ShapeColor = getDefShapeColor()
except App.Base.FreeCADError as err:
#these errors pop up while loading project file, apparently because
# viewprovider is up already, but the shape vis mesh wasn't yet
# created. It is safe to ignore them, as DisplayMode is eventually
# restored to the correct values.
#Proper way of dealing with it would have been by testing for
# isRestoring(??), but I failed to find the way to do it.
#--DeepSOIC
pass
class ViewProviderLatticeFeature:
"A View Provider for base lattice object"
def __init__(self,vobj):
'''Don't override. Override derivedInit, please!'''
vobj.Proxy = self
prop = "DontUnhideOnDelete"
vobj.addProperty("App::PropertyBool",prop,"Lattice","Makes the element be populated into object's Placement property")
vobj.setEditorMode(prop, 2) # set hidden
self.derivedInit(vobj)
def derivedInit(self,vobj):
pass
def verifyIntegrity(self):
if self.__init__.__func__ is not ViewProviderLatticeFeature.__init__.__func__:
FreeCAD.Console.PrintError("__init__() of lattice object view provider is overridden. Please don't! Fix it!\n")
def getIcon(self):
return getIconPath("Lattice.svg")
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def setEdit(self,vobj,mode):
return False
def unsetEdit(self,vobj,mode):
return
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def claimChildren(self):
self.Object.Proxy.verifyIntegrity()
self.verifyIntegrity()
return []
def onDelete(self, feature, subelements): # subelements is a tuple of strings
try:
if hasattr(self.ViewObject,"DontUnhideOnDelete") and self.ViewObject.DontUnhideOnDelete:
pass
else:
children = self.claimChildren()
if children and len(children) > 0:
marker = latticeMarkers
for child in children:
child.ViewObject.show()
except Exception as err:
# catch all exceptions, because we don't want to prevent deletion if something goes wrong
FreeCAD.Console.PrintError("Error in onDelete: " + err.message)
return True
| lgpl-2.1 | -9,035,229,541,461,682,000 | 42.954023 | 191 | 0.575277 | false | 4.4513 | false | false | false |
intel-ctrlsys/actsys | actsys/control/diagnostics/mock_diagnostics/mock_diagnostics.py | 1 | 6050 | #
#Copyright (c) 2017 Intel Corp.
#
"""
Interface for all diagnostic tests plugins.
"""
from control.console_log.mock_console_log.ipmi_mock import MockConsoleLog
from control.diagnostics.diagnostics import Diagnostics
from control.plugin import DeclarePlugin
@DeclarePlugin('mock', 100)
class MockDiagnostics(Diagnostics):
"""This class controls launching the inband diagnostic tests
This needs the input of a file """
mock_provision = False
Test_Status = {}
def __init__(self, **kwargs):
Diagnostics.__init__(self, **kwargs)
self.reboot_true = False
self.img = kwargs['diag_image']
self.old_image = None
self.kargs = kwargs['test_name']
self.old_kargs = None
self.console_log = None
self.device = None
self.bmc = None
self.device_name = None
self.plugin_manager = kwargs['plugin_manager']
self.resource_manager = None
self.provisioner = None
self.power_manager = None
def _verify_provisioning(self, device, img):
self.old_image = self.device.get("image")
self.old_kargs = self.device.get("provisioner_kernel_args")
if self.mock_provision is True:
self.provisioner.add(self.device)
self.provisioner.set_image(self.device, img)
try:
device_list = self.provisioner.list()
img_list = self.provisioner.list_images()
except Exception as ex:
raise Exception(
"Error: Failed to read data from provisioner because {0}. No tests will be run.".format(str(ex)))
if device not in device_list or img not in img_list:
raise Exception(
"Error: Device does not exist in provisioner, provision device to continue")
else:
self.old_image = self.device.get("image")
self.old_kargs = self.device.get("provisioner_kernel_args")
def _provision_image(self, img, args):
try:
self.provisioner.set_image(self.device, img)
self.provisioner.set_kernel_args(self.device, args)
except Exception as ex:
raise Exception("Failed to set image {0} or test {1}. Provisioner returned error {2}. "
"Cannot run diagnostics. ".format(img, args, str(ex)))
def _set_node_state(self, state):
result = self.power_manager.set_device_power_state(state)
if result[self.device_name] is not True:
raise Exception("Failed to power {0} node during provisioning "
"diagnostic image. No tests will be run.".format(state))
def launch_diags(self, device, bmc):
"""launches the diagnostic tests"""
self.device = device
result_list = dict()
self.bmc = bmc
self.device_name = self.device.get("hostname")
if self.device.get("provisioner") is None or self.device.get("resource_controller") is None or \
self.device.get("device_power_control") is None:
raise Exception("You are missing the provisioner or resource_controller or device_power_control key in your"
" config file. Please edit the file and try again.")
self.provisioner = self.plugin_manager.create_instance('provisioner', self.device.get("provisioner"))
self.resource_manager = self.plugin_manager.create_instance('resource_control',
self.device.get("resource_controller"))
power_options = self._pack_options()
self.power_manager = self.plugin_manager.create_instance('power_control',
self.device.get("device_power_control"),
**power_options)
if self.device.get("provisioner") in "mock":
self.mock_provision = True
self._verify_provisioning(self.device_name, self.img)
MockDiagnostics.Test_Status[self.device_name] = 'Running'
# Step 1: Remove node from resource pool
dev_l = list()
dev_l.append(self.device_name)
current_state = self.resource_manager.check_nodes_state(dev_l)[1]
if "idle" in current_state:
result = self.resource_manager.remove_nodes_from_resource_pool(dev_l)
if result[0] != 0:
raise Exception(
"Cannot remove node from resource pool for running diagnostics since {0}".format(result[1]))
else:
raise Exception("Cannot remove node from resource pool. {}".format(current_state))
# start console log
self.console_log = MockConsoleLog(self.device_name, '127.0.0.1', 'user', 'password')
console_log_returned, result = self.console_log.start_log_capture('End of Diagnostics', 'Return Code: ')
result_list[self.device_name] = result
# Step 2: Provision diagnostic image
self._provision_image(self.img, self.kargs)
self._set_node_state('Off')
self._set_node_state('On')
# Step 3: Run tests and parse log for completion
# Step 4: Provision node back to old image
if not self.reboot_true:
self._provision_image(self.old_image, self.old_kargs)
self._set_node_state('Off')
self._set_node_state('On')
# Step 5: Add node back to resource pool
result = self.resource_manager.add_nodes_to_resource_pool(dev_l)
if result[0] != 0:
raise Exception("Failed to add node back to resource pool")
return result_list
def _pack_options(self):
"""Return the node power control options based on the node_name and
configuration object."""
options = {}
dev_l = list()
dev_l.append(self.device)
options['device_list'] = dev_l
options['bmc_list'] = [self.bmc]
options['plugin_manager'] = self.plugin_manager
return options
| apache-2.0 | 1,918,063,083,031,206,000 | 42.52518 | 120 | 0.600496 | false | 4.115646 | true | false | false |
PyCon/pycon | symposion/proposals/migrations/0012_auto_20180921_1053.py | 1 | 1034 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0011_proposal_created_updated'),
]
operations = [
migrations.AlterField(
model_name='proposalbase',
name='additional_notes',
field=models.TextField(help_text='Anything else you would like to share with the committee:<br> <b>Please do not submit any personally identifiable information.</b> The initial round of reviews are annonymous, and this field will visible to reviewers.<br> Speaker public speaking experience.<br> Speaker subject matter experience.<br> Have the speaker(s) given this presentation before elsewhere?<br> Links to recordings, slides, blog posts, code, or other material. <br> Specific needs or special requests \u2014 accessibility, audio (will you need to play pre-recorded sound?), or restrictions on when your talk can be scheduled.', blank=True),
),
]
| bsd-3-clause | -448,724,437,121,979,970 | 53.421053 | 658 | 0.713733 | false | 4.15261 | false | false | false |
5StevenWu/Coursepy | L05/表达式的yield的用途.py | 1 | 1842 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#此为最终结果 已经可以查找到有关键字的文件
'''
grep -rl 'python' /root
'''
import os
def init(func):
def wrapper(*args,**kwargs):
res = func(*args,**kwargs)
next(res)
return res
return wrapper
@init #生成器调用装饰器
def search(target):
'''取出所有文件路径'''
#更改为生成器
while True:
search_path = yield
g=os.walk(search_path)
for par_dir,_,files in g:
for file in files:
file_abs_path=r'%s\%s'%(par_dir,file)
# print('file_abs_path is ==>: ',file_abs_path)
target.send(file_abs_path)
#g=search()
#d=r'D:\code\py\py3\Coursepy'
#g.send(d)
@init
def opener(target):
while True:
file_abs_path=yield
# print('opener==>: ',file_abs_path)
with open(file_abs_path,encoding='utf-8') as f:
target.send((file_abs_path,f))
# pass
#o=opener()
#o.__next__
#o.send('/2.py')
#g=search(opener()) # 将opener函数传送给search 直接在search函数里直接打开
#g.send(d) 测试发送文件打开
@init
def cat(target):
'''遍历文件内容'''
while True:
file_abs_path,f=yield
for line in f:
#print(line)
# print('file_abs_path & line : ',file_abs_path,line)
target.send((file_abs_path,line))
@init
def grep(target,pattern):
while True:
file_abs_path,line=yield
if pattern in line:
target.send(file_abs_path)
@init
def printer():
while True:
file_abs_path=yield
print(file_abs_path)
#将文件路径发送给函数
xx=r'D:\code\py\py3\Coursepy\L05\a\b\b'
x=r'D:\code\py\py3\Coursepy\L05\a'
gg=search(opener(cat(grep(printer(),'python'))))
#print(gg)
gg.send(x) | apache-2.0 | 4,955,264,629,553,978,000 | 21.689189 | 64 | 0.567938 | false | 2.482249 | false | false | false |
asi1024/Othbot | main.py | 1 | 3481 | #!/usr/local/bin/python
import Image
import autopy
import os
import time
startX, spanX, startY, spanY = 30, 150, 130, 40
pX, pY = 180, 225
size, span = 30, 56
boxs = size ** 2
starts = spanX * spanY
# draw
def draw_start(im):
for i in range(spanY):
for j in range(spanX):
im.putpixel ((startX + j, startY + i), (0, 0, 0))
def draw_board(im):
for row in range(8):
for col in range(8):
x, y = pX + col * span, pY + row * span
for i in range(size):
for j in range(size):
im.putpixel ((x + j, y + i), (0, 0, 0))
def draw():
os.system("mkdir -p tmp")
os.system("screencapture tmp/sc.png")
im = Image.open("tmp/sc.png")
draw_start(im)
draw_board(im)
im.show()
#take
def take_start(im):
R, G, B = 0, 0, 0
for i in range(spanY):
for j in range(spanX):
r, g, b = im.getpixel ((startX + j, startY + i))
R, G, B = R + r, G + g, B + b
R, G, B = R / starts, G / starts, B / starts
return (R + G + B > 430)
def take_box(im, row, col):
x, y = pX + col * span, pY + row * span
R, G, B = 0, 0, 0
for i in range(size):
for j in range(size):
r, g, b = im.getpixel ((x + j, y + i))
R, G, B = R + r, G + g, B + b
R, G, B = R / boxs, G / boxs, B / boxs
if G - B > 10:
return 0
elif B > 200:
return 3
elif B > 105:
return 4
elif R > 53:
return 2
else:
return 1
def output_data(dat):
for ary in dat:
for i in ary:
print i,
print ""
def board_data(im):
dat = [[0 for i in range(8)] for j in range(8)]
for i in range(8):
for j in range(8):
dat[i][j] = take_box(im, i, j)
return dat
def run():
os.system("mkdir -p tmp")
os.system("screencapture tmp/sc.png")
im = Image.open("tmp/sc.png")
if (take_start(im)):
autopy.mouse.move(startX + spanX / 2, startY + spanY / 2)
time.sleep(2)
autopy.mouse.click()
return
board = board_data(im)
if board[3][3] == 0:
autopy.mouse.move(300, 500)
autopy.mouse.click()
return
flag1, flag2 = 0, 0
for ary in board:
for i in ary:
if i == 4:
flag1 += 1
if i == 2:
flag2 += 1
if flag1 >= 2 or flag2 >= 2:
time.sleep(5)
print "waiting..."
return
if True:
f = open('tmp/input', 'w')
for ary in board:
for i in ary:
if i == 0:
f.write(".")
elif i <= 2:
f.write("x")
elif i <= 4:
f.write("o")
f.write("\n")
f.close()
os.system("./a.out < tmp/input > tmp/output")
x, y = 0, 0
for line in open('tmp/output', 'r'):
items = line.split(' ')
y = int(items[0])
x = int(items[1])
xx = pX + span / 2 + span * x
yy = pY + span / 2 + span * y
autopy.mouse.move(xx, yy)
os.system("screencapture tmp/sc.png")
im = Image.open("tmp/sc.png")
board2 = board_data(im)
if board == board2:
autopy.mouse.click()
time.sleep(1)
def main():
# draw()
# return
os.system("g++-4.9 --std=c++11 src/othello.cpp")
while True:
time.sleep(1)
run()
main()
| mit | 5,038,916,443,684,973,000 | 24.043165 | 65 | 0.460213 | false | 3.058875 | false | false | false |
kdyq007/cmdb-api | lib/ci.py | 1 | 27854 | # -*- coding:utf-8 -*-
import uuid
import time
import datetime
import json
from flask import current_app
from flask import abort
from sqlalchemy import or_
from extensions import db
from extensions import rd
from models.ci import CI
from models.ci_relation import CIRelation
from models.ci_type import CITypeAttribute
from models.ci_type import CITypeCache
from models.ci_type import CITypeSpecCache
from models.history import CIAttributeHistory
from models.attribute import CIAttributeCache
from lib.const import TableMap
from lib.const import type_map
from lib.value import AttributeValueManager
from lib.history import CIAttributeHistoryManger
from lib.history import CIRelationHistoryManager
from lib.query_sql import QUERY_HOSTS_NUM_BY_PRODUCT
from lib.query_sql import QUERY_HOSTS_NUM_BY_BU
from lib.query_sql import QUERY_HOSTS_NUM_BY_PROJECT
from lib.query_sql import QUERY_CIS_BY_IDS
from lib.query_sql import QUERY_CIS_BY_VALUE_TABLE
from tasks.cmdb import ci_cache
from tasks.cmdb import ci_delete
class CIManager(object):
""" manage CI interface
"""
def __init__(self):
pass
def get_ci_by_id(self, ci_id, ret_key="name",
fields=None, need_children=True, use_master=False):
"""@params: `ret_key` is one of 'name', 'id', 'alias'
`fields` is list of attribute name/alias/id
"""
ci = CI.query.get(ci_id) or \
abort(404, "CI {0} is not existed".format(ci_id))
res = dict()
if need_children:
children = self.get_children(ci_id, ret_key=ret_key) # one floor
res.update(children)
ci_type = CITypeCache.get(ci.type_id)
res["ci_type"] = ci_type.type_name
uniq_key = CIAttributeCache.get(ci_type.uniq_id)
if not fields: # fields are all attributes
attr_ids = db.session.query(CITypeAttribute.attr_id).filter_by(
type_id=ci.type_id)
fields = [CIAttributeCache.get(_.attr_id).attr_name
for _ in attr_ids]
if uniq_key.attr_name not in fields:
fields.append(uniq_key.attr_name)
if fields:
value_manager = AttributeValueManager()
_res = value_manager._get_attr_values(
fields, ci_id,
ret_key=ret_key, uniq_key=uniq_key, use_master=use_master)
res.update(_res)
res['_type'] = ci_type.type_id
res['_id'] = ci_id
return res
def get_ci_by_ids(self, ci_id_list, ret_key="name", fields=None):
result = list()
for ci_id in ci_id_list:
res = self.get_ci_by_id(ci_id, ret_key=ret_key, fields=fields)
result.append(res)
return result
def get_children(self, ci_id, ret_key='name', relation_type="contain"):
second_cis = db.session.query(CIRelation.second_ci_id).filter(
CIRelation.first_ci_id == ci_id).filter(or_(
CIRelation.relation_type == relation_type,
CIRelation.relation_type == "deploy"))
second_ci_ids = (second_ci.second_ci_id for second_ci in second_cis)
ci_types = {}
for ci_id in second_ci_ids:
type_id = db.session.query(CI.type_id).filter(
CI.ci_id == ci_id).first().type_id
if type_id not in ci_types:
ci_types[type_id] = [ci_id]
else:
ci_types[type_id].append(ci_id)
res = {}
for type_id in ci_types:
ci_type = CITypeCache.get(type_id)
children = get_cis_by_ids(map(str, ci_types.get(type_id)),
ret_key=ret_key)
res[ci_type.type_name] = children
return res
def get_cis_by_type(self, type_id, ret_key="name", fields="",
page=1, per_page=None):
if per_page is None:
per_page = current_app.config.get("DEFAULT_PAGE_COUNT")
cis = db.session.query(CI.ci_id).filter(CI.type_id == type_id)
numfound = cis.count()
cis = cis.offset((page - 1) * per_page).limit(per_page)
res = list()
ci_ids = [str(ci.ci_id) for ci in cis]
if ci_ids:
res = get_cis_by_ids(ci_ids, ret_key, fields)
return numfound, page, res
def ci_is_exist(self, ci_type, unique_key, unique):
table = TableMap(attr_name=unique_key.attr_name).table
unique = db.session.query(table).filter(
table.attr_id == unique_key.attr_id).filter(
table.value == unique).first()
if unique:
return db.session.query(CI).filter(
CI.ci_id == unique.ci_id).first()
def _delete_ci_by_id(self, ci_id):
db.session.query(CI.ci_id).filter(CI.ci_id == ci_id).delete()
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error("delete ci is error, {0}".format(str(e)))
def add(self, ci_type_name, exist_policy="replace",
_no_attribute_policy="ignore", **ci_dict):
ci_existed = False
ci_type = CITypeCache.get(ci_type_name) or \
abort(404, "CIType {0} is not existed".format(ci_type_name))
unique_key = CIAttributeCache.get(ci_type.uniq_id) \
or abort(400, 'illegality unique attribute')
unique = ci_dict.get(unique_key.attr_name) \
or abort(400, '{0} missing'.format(unique_key.attr_name))
old_ci = self.ci_is_exist(ci_type, unique_key, unique)
if old_ci is not None:
ci_existed = True
if exist_policy == 'reject':
return abort(400, 'CI is existed')
if old_ci.type_id != ci_type.type_id: # update ci_type
old_ci.type_id = ci_type.type_id
db.session.add(old_ci)
db.session.flush()
ci = old_ci
else:
if exist_policy == 'need':
return abort(404, 'CI {0} not exist'.format(unique))
ci = CI()
ci.type_id = ci_type.type_id
_uuid = uuid.uuid4().hex
ci.uuid = _uuid
ci.created_time = datetime.datetime.now()
db.session.add(ci)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error('add CI error: {0}'.format(str(e)))
return abort(400, 'add CI error')
value_manager = AttributeValueManager()
histories = list()
for p, v in ci_dict.items():
ret, res = value_manager.add_attr_value(
p, v, ci.ci_id, ci_type,
_no_attribute_policy=_no_attribute_policy,
ci_existed=ci_existed)
if not ret:
db.session.rollback()
if not ci_existed:
self.delete(ci.ci_id)
current_app.logger.info(res)
return abort(400, res)
if res is not None:
histories.append(res)
try:
db.session.commit()
except Exception as e:
current_app.logger.error(str(e))
db.session.rollback()
if not ci_existed: # only add
self.delete(ci.ci_id)
return abort(400, "add CI error")
his_manager = CIAttributeHistoryManger()
his_manager.add(ci.ci_id, histories)
ci_cache.apply_async([ci.ci_id], queue="cmdb_async")
return ci.ci_id
def update_unique_value(self, ci_id, args):
ci = self.get_ci_by_id(ci_id, need_children=False)
unique_key = ci.get("unique")
attr = CIAttributeCache.get(unique_key)
table_key = "index_{0}".format(attr.value_type) \
if attr.is_index else attr.value_type
value_table = type_map.get("table").get(table_key)
v = args.get(unique_key)
if value_table and v:
item = db.session.query(value_table).filter(
value_table.ci_id == ci_id).filter(
value_table.attr_id == attr.attr_id).first()
if item:
converter = type_map.get("converter").get(attr.value_type)
try:
item.value = converter(v)
except:
return abort(400, "value is illegal")
db.session.add(item)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(str(e))
return abort(400, "update unique failed")
ci_cache.apply_async([ci_id], queue="cmdb_async")
def delete(self, ci_id):
ci = db.session.query(CI).filter(CI.ci_id == ci_id).first()
if ci is not None:
attrs = db.session.query(CITypeAttribute.attr_id).filter(
CITypeAttribute.type_id == ci.type_id).all()
attr_names = []
for attr in attrs:
attr_names.append(CIAttributeCache.get(attr.attr_id).attr_name)
attr_names = set(attr_names)
for attr_name in attr_names:
Table = TableMap(attr_name=attr_name).table
db.session.query(Table).filter(Table.ci_id == ci_id).delete()
db.session.query(CIRelation).filter(
CIRelation.first_ci_id == ci_id).delete()
db.session.query(CIRelation).filter(
CIRelation.second_ci_id == ci_id).delete()
# db.session.query(CIAttributeHistory).filter(
# CIAttributeHistory.ci_id == ci_id).delete()
db.session.flush()
db.session.delete(ci)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error("delete CI error, {0}".format(str(e)))
return abort(400, "delete CI error, {0}".format(str(e)))
# todo: write history
ci_delete.apply_async([ci.ci_id], queue="cmdb_async")
return ci_id
return abort(404, "CI {0} not found".format(ci_id))
def add_heartbeat(self, ci_type, unique):
ci_type = CITypeCache.get(ci_type)
if not ci_type:
return 'error'
uniq_key = CIAttributeCache.get(ci_type.uniq_id)
Table = TableMap(attr_name=uniq_key.attr_name).table
ci_id = db.session.query(Table.ci_id).filter(
Table.attr_id == uniq_key.attr_id).filter(
Table.value == unique).first()
if ci_id is None:
return 'error'
ci = db.session.query(CI).filter(CI.ci_id == ci_id.ci_id).first()
if ci is None:
return 'error'
ci.heartbeat = datetime.datetime.now()
db.session.add(ci)
db.session.commit()
return "ok"
def get_heartbeat(self, page, type_id, agent_status=None):
query = db.session.query(CI.ci_id, CI.heartbeat)
expire = datetime.datetime.now() - datetime.timedelta(minutes=72)
if type_id:
query = query.filter(CI.type_id == type_id)
else:
query = query.filter(db.or_(CI.type_id == 7, CI.type_id == 8))
if agent_status == -1:
query = query.filter(CI.heartbeat == None)
elif agent_status == 0:
query = query.filter(CI.heartbeat <= expire)
elif agent_status == 1:
query = query.filter(CI.heartbeat > expire)
numfound = query.count()
per_page_count = current_app.config.get("DEFAULT_PAGE_COUNT")
cis = query.offset((page - 1) * per_page_count).limit(
per_page_count).all()
ci_ids = [ci.ci_id for ci in cis]
heartbeat_dict = {}
for ci in cis:
if agent_status is not None:
heartbeat_dict[ci.ci_id] = agent_status
else:
if ci.heartbeat is None:
heartbeat_dict[ci.ci_id] = -1
elif ci.heartbeat <= expire:
heartbeat_dict[ci.ci_id] = 0
else:
heartbeat_dict[ci.ci_id] = 1
current_app.logger.debug(heartbeat_dict)
ci_ids = map(str, ci_ids)
res = get_cis_by_ids(ci_ids, fields=["hostname", "private_ip"])
result = [(i.get("hostname"), i.get("private_ip")[0], i.get("ci_type"),
heartbeat_dict.get(i.get("_id"))) for i in res
if i.get("private_ip")]
return numfound, result
class CIRelationManager(object):
"""
manage relation between CIs
"""
def __init__(self):
pass
@property
def relation_types(self):
""" all CIType relation types
"""
from lib.const import CI_RELATION_TYPES
return CI_RELATION_TYPES
def get_second_cis(self, first_ci, relation_type="contain",
page=1, per_page=None, **kwargs):
if per_page is None:
per_page = current_app.config.get("DEFAULT_PAGE_COUNT")
second_cis = db.session.query(
CI.ci_id).join(
CIRelation, CIRelation.second_ci_id == CI.ci_id).filter(
CIRelation.first_ci_id == first_ci).filter(
CIRelation.relation_type == relation_type)
if kwargs: # special for devices
second_cis = self._query_wrap_for_device(second_cis, **kwargs)
numfound = second_cis.count()
second_cis = second_cis.offset(
(page - 1) * per_page).limit(per_page).all()
ci_ids = [str(son.ci_id) for son in second_cis]
total = len(ci_ids)
result = get_cis_by_ids(ci_ids)
return numfound, total, result
def get_grandsons(self, ci_id, page=1, per_page=None, **kwargs):
if per_page is None:
per_page = current_app.config.get("DEFAULT_PAGE_COUNT")
children = db.session.query(CIRelation.second_ci_id).filter(
CIRelation.first_ci_id == ci_id).subquery()
grandsons = db.session.query(CIRelation.second_ci_id).join(
children,
children.c.second_ci_id == CIRelation.first_ci_id).subquery()
grandsons = db.session.query(CI.ci_id).join(
grandsons, grandsons.c.second_ci_id == CI.ci_id)
if kwargs:
grandsons = self._query_wrap_for_device(grandsons, **kwargs)
numfound = grandsons.count()
grandsons = grandsons.offset(
(page - 1) * per_page).limit(per_page).all()
if not grandsons:
return 0, 0, []
ci_ids = [str(son.ci_id) for son in grandsons]
total = len(ci_ids)
result = get_cis_by_ids(ci_ids)
return numfound, total, result
def _sort_handler(self, sort_by, query_sql):
if sort_by.startswith("+"):
sort_type = "asc"
sort_by = sort_by[1:]
elif sort_by.startswith("-"):
sort_type = "desc"
sort_by = sort_by[1:]
else:
sort_type = "asc"
attr = CIAttributeCache.get(sort_by)
if attr is None:
return query_sql
attr_id = attr.attr_id
Table = TableMap(attr_name=sort_by).table
CI_table = query_sql.subquery()
query_sql = db.session.query(CI_table.c.ci_id, Table.value).join(
Table, Table.ci_id == CI_table.c.ci_id).filter(
Table.attr_id == attr_id).order_by(
getattr(Table.value, sort_type)())
return query_sql
def _query_wrap_for_device(self, query_sql, **kwargs):
_type = kwargs.pop("_type", False) or kwargs.pop("type", False) \
or kwargs.pop("ci_type", False)
if _type:
ci_type = CITypeCache.get(_type)
if ci_type is None:
return
query_sql = query_sql.filter(CI.type_id == ci_type.type_id)
for k, v in kwargs.iteritems():
attr = CIAttributeCache.get(k)
if attr is None:
continue
Table = TableMap(attr_name=k).table
CI_table = query_sql.subquery()
query_sql = db.session.query(CI_table.c.ci_id).join(
Table, Table.ci_id == CI_table.c.ci_id).filter(
Table.attr_id == attr.attr_id).filter(
Table.value.ilike(v.replace("*", "%")))
current_app.logger.debug(query_sql)
sort_by = kwargs.pop("sort", False)
if sort_by:
query_sql = self._sort_handler(sort_by, query_sql)
return query_sql
def get_great_grandsons(self, ci_id, page=1, per_page=None, **kwargs):
if per_page is None:
per_page = current_app.config.get("DEFAULT_PAGE_COUNT")
children = db.session.query(CIRelation.second_ci_id).filter(
CIRelation.first_ci_id == ci_id).subquery()
grandsons = db.session.query(CIRelation.second_ci_id).join(
children,
children.c.second_ci_id == CIRelation.first_ci_id).subquery()
great_grandsons = db.session.query(CIRelation.second_ci_id).join(
grandsons,
grandsons.c.second_ci_id == CIRelation.first_ci_id).subquery()
great_grandsons = db.session.query(CI.ci_id).join(
great_grandsons, great_grandsons.c.second_ci_id == CI.ci_id)
if kwargs:
great_grandsons = self._query_wrap_for_device(
great_grandsons, **kwargs)
if great_grandsons is None:
return 0, 0, []
numfound = great_grandsons.count()
great_grandsons = great_grandsons.offset(
(page - 1) * per_page).limit(per_page).all()
ci_ids = [str(son.ci_id) for son in great_grandsons]
total = len(ci_ids)
result = get_cis_by_ids(ci_ids)
return numfound, total, result
def get_first_cis(self, second_ci, relation_type="contain",
page=1, per_page=None):
"""only for CI Type
"""
if per_page is None:
per_page = current_app.config.get("DEFAULT_PAGE_COUNT")
first_cis = db.session.query(CIRelation.first_ci_id).filter(
CIRelation.second_ci_id == second_ci).filter(
CIRelation.relation_type == relation_type)
numfound = first_cis.count()
first_cis = first_cis.offset(
(page - 1) * per_page).limit(per_page).all()
result = []
first_ci_ids = [str(first_ci.first_ci_id) for first_ci in first_cis]
total = len(first_ci_ids)
if first_ci_ids:
result = get_cis_by_ids(first_ci_ids)
return numfound, total, result
def get_grandfather(self, ci_id, relation_type="contain"):
"""only for CI Type
"""
grandfather = db.session.query(CIRelation.first_ci_id).filter(
CIRelation.second_ci_id.in_(db.session.query(
CIRelation.first_ci_id).filter(
CIRelation.second_ci_id == ci_id).filter(
CIRelation.relation_type == relation_type))).filter(
CIRelation.relation_type == relation_type).first()
if grandfather:
return CIManager().get_ci_by_id(grandfather.first_ci_id,
need_children=False)
def add(self, first_ci, second_ci, more=None, relation_type="contain"):
ci = db.session.query(CI.ci_id).filter(CI.ci_id == first_ci).first()
if ci is None:
return abort(404, "first_ci {0} is not existed".format(first_ci))
c = db.session.query(CI.ci_id).filter(CI.ci_id == second_ci).first()
if c is None:
return abort(404, "second_ci {0} is not existed".format(
second_ci))
existed = db.session.query(CIRelation.cr_id).filter(
CIRelation.first_ci_id == first_ci).filter(
CIRelation.second_ci_id == second_ci).first()
if existed is not None:
return existed.cr_id
cr = CIRelation()
cr.first_ci_id = first_ci
cr.second_ci_id = second_ci
if more is not None:
cr.more = more
cr.relation_type = relation_type
db.session.add(cr)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error("add CIRelation is error, {0}".format(
str(e)))
return abort(400, "add CIRelation is error, {0}".format(str(e)))
# write history
his_manager = CIRelationHistoryManager()
his_manager.add(cr.cr_id, cr.first_ci_id, cr.second_ci_id,
relation_type, operate_type="add")
return cr.cr_id
def delete(self, cr_id):
cr = db.session.query(CIRelation).filter(
CIRelation.cr_id == cr_id).first()
cr_id = cr.cr_id
first_ci = cr.first_ci_id
second_ci = cr.second_ci_id
if cr is not None:
db.session.delete(cr)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(
"delete CIRelation is error, {0}".format(str(e)))
return abort(
400, "delete CIRelation is error, {0}".format(str(e)))
his_manager = CIRelationHistoryManager()
his_manager.add(cr_id, first_ci, second_ci,
cr.relation_type, operate_type="delete")
return True
return abort(404, "CI relation is not existed")
def delete_2(self, first_ci, second_ci):
cr = db.session.query(CIRelation).filter(
CIRelation.first_ci_id == first_ci).filter(
CIRelation.second_ci_id == second_ci).first()
return self.delete(cr.cr_id)
class HostNumStatis(object):
def __init__(self):
pass
def get_hosts_by_project(self, project_id_list=None):
res = {}
if not project_id_list:
project = CITypeCache.get("project")
projects = db.session.query(CI.ci_id).filter(
CI.type_id == project.type_id).all()
project_id_list = (project.ci_id for project in projects)
project_id_list = map(str, project_id_list)
project_ids = ",".join(project_id_list)
nums = db.session.execute(QUERY_HOSTS_NUM_BY_PROJECT.format(
"".join(["(", project_ids, ")"]))).fetchall()
if nums:
for ci_id in project_id_list:
res[int(ci_id)] = 0
for ci_id, num in nums:
res[ci_id] = num
return res
def get_hosts_by_product(self, product_id_list=None):
res = {}
if not product_id_list:
product = CITypeCache.get("product")
products = db.session.query(CI.ci_id).filter(
CI.type_id == product.type_id).all()
product_id_list = (product.ci_id for product in products)
product_id_list = map(str, product_id_list)
product_ids = ",".join(product_id_list)
nums = db.session.execute(QUERY_HOSTS_NUM_BY_PRODUCT.format(
"".join(["(", product_ids, ")"]))).fetchall()
if nums:
for ci_id in product_id_list:
res[int(ci_id)] = 0
for ci_id, num in nums:
res[ci_id] = num
return res
def get_hosts_by_bu(self, bu_id_list=None):
res = {}
if not bu_id_list:
bu = CITypeCache.get("bu")
bus = db.session.query(CI.ci_id).filter(
CI.type_id == bu.type_id).all()
bu_id_list = (bu.ci_id for bu in bus)
bu_id_list = map(str, bu_id_list)
bu_ids = ",".join(bu_id_list)
current_app.logger.debug(QUERY_HOSTS_NUM_BY_BU.format(
"".join(["(", bu_ids, ")"])))
if not bu_ids:
return res
nums = db.session.execute(
QUERY_HOSTS_NUM_BY_BU.format(
"".join(["(", bu_ids, ")"]))).fetchall()
if nums:
for ci_id in bu_id_list:
res[int(ci_id)] = 0
for ci_id, num in nums:
res[ci_id] = num
return res
def get_cis_by_ids(ci_ids, ret_key="name", fields="", value_tables=None):
""" argument ci_ids are string list of CI instance ID, eg. ['1', '2']
"""
if not ci_ids:
return []
start = time.time()
ci_id_tuple = tuple(map(int, ci_ids))
res = rd.get(ci_id_tuple)
if res is not None and None not in res and ret_key == "name":
res = map(json.loads, res)
if not fields:
return res
else:
_res = []
for d in res:
_d = dict()
_d["_id"], _d["_type"] = d.get("_id"), d.get("_type")
_d["ci_type"] = d.get("ci_type")
for field in fields:
_d[field] = d.get(field)
_res.append(_d)
current_app.logger.debug("filter time: %s" % (time.time() - start))
return _res
current_app.logger.warning("cache not hit...............")
if not fields:
_fields = ""
else:
_fields = list()
for field in fields:
attr = CIAttributeCache.get(field)
if attr is not None:
_fields.append(str(attr.attr_id))
_fields = "WHERE A.attr_id in ({0})".format(",".join(_fields))
ci_ids = ",".join(ci_ids)
if value_tables is None:
value_tables = type_map["table_name"].values()
current_app.logger.debug(value_tables)
value_sql = " UNION ".join([QUERY_CIS_BY_VALUE_TABLE.format(value_table,
ci_ids)
for value_table in value_tables])
query_sql = QUERY_CIS_BY_IDS.format(ci_ids, _fields, value_sql)
current_app.logger.debug(query_sql)
start = time.time()
hosts = db.session.execute(query_sql).fetchall()
current_app.logger.info("get cis time is: {0}".format(
time.time() - start))
ci_list = set()
res = list()
ci_dict = dict()
start = time.time()
for ci_id, type_id, attr_id, attr_name, \
attr_alias, value, value_type, is_multivalue in hosts:
if ci_id not in ci_list:
ci_dict = dict()
ci_type = CITypeSpecCache.get(type_id)
ci_dict["_id"] = ci_id
ci_dict["_type"] = type_id
ci_dict["ci_type"] = ci_type.type_name
ci_dict["ci_type_alias"] = ci_type.type_alias
ci_list.add(ci_id)
res.append(ci_dict)
if ret_key == "name":
if is_multivalue:
if isinstance(ci_dict.get(attr_name), list):
ci_dict[attr_name].append(value)
else:
ci_dict[attr_name] = [value]
else:
ci_dict[attr_name] = value
elif ret_key == "alias":
if is_multivalue:
if isinstance(ci_dict.get(attr_alias), list):
ci_dict[attr_alias].append(value)
else:
ci_dict[attr_alias] = [value]
else:
ci_dict[attr_alias] = value
elif ret_key == "id":
if is_multivalue:
if isinstance(ci_dict.get(attr_id), list):
ci_dict[attr_id].append(value)
else:
ci_dict[attr_id] = [value]
else:
ci_dict[attr_id] = value
current_app.logger.debug("result parser time is: {0}".format(
time.time() - start))
return res
| gpl-2.0 | 297,984,128,190,413,200 | 38.565341 | 79 | 0.539348 | false | 3.522256 | false | false | false |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/fixes.py | 1 | 29568 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
# XXX : copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD
from __future__ import division
import collections
from operator import itemgetter
import inspect
import warnings
import numpy as np
import scipy
from scipy import linalg, sparse
from math import ceil, log
from numpy.fft import irfft
from distutils.version import LooseVersion
from functools import partial
from .externals import six
from .externals.six.moves import copyreg, xrange
from gzip import GzipFile
###############################################################################
# Misc
class gzip_open(GzipFile): # python2.6 doesn't have context managing
def __enter__(self):
if hasattr(GzipFile, '__enter__'):
return GzipFile.__enter__(self)
else:
return self
def __exit__(self, exc_type, exc_value, traceback):
if hasattr(GzipFile, '__exit__'):
return GzipFile.__exit__(self, exc_type, exc_value, traceback)
else:
return self.close()
class _Counter(collections.defaultdict):
"""Partial replacement for Python 2.7 collections.Counter."""
def __init__(self, iterable=(), **kwargs):
super(_Counter, self).__init__(int, **kwargs)
self.update(iterable)
def most_common(self):
return sorted(six.iteritems(self), key=itemgetter(1), reverse=True)
def update(self, other):
"""Adds counts for elements in other"""
if isinstance(other, self.__class__):
for x, n in six.iteritems(other):
self[x] += n
else:
for x in other:
self[x] += 1
try:
Counter = collections.Counter
except AttributeError:
Counter = _Counter
def _unique(ar, return_index=False, return_inverse=False):
"""A replacement for the np.unique that appeared in numpy 1.4.
While np.unique existed long before, keyword return_inverse was
only added in 1.4.
"""
try:
ar = ar.flatten()
except AttributeError:
if not return_inverse and not return_index:
items = sorted(set(ar))
return np.asarray(items)
else:
ar = np.asarray(ar).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
elif return_inverse or return_index:
return ar, np.empty(0, np.bool)
else:
return ar
if return_inverse or return_index:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]
else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]
if LooseVersion(np.__version__) < LooseVersion('1.5'):
unique = _unique
else:
unique = np.unique
def _bincount(X, weights=None, minlength=None):
"""Replacing np.bincount in numpy < 1.6 to provide minlength."""
result = np.bincount(X, weights)
if minlength is None or len(result) >= minlength:
return result
out = np.zeros(minlength, np.int)
out[:len(result)] = result
return out
if LooseVersion(np.__version__) < LooseVersion('1.6'):
bincount = _bincount
else:
bincount = np.bincount
def _copysign(x1, x2):
"""Slow replacement for np.copysign, which was introduced in numpy 1.4"""
return np.abs(x1) * np.sign(x2)
if not hasattr(np, 'copysign'):
copysign = _copysign
else:
copysign = np.copysign
def _in1d(ar1, ar2, assume_unique=False, invert=False):
"""Replacement for in1d that is provided for numpy >= 1.4"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
if not hasattr(np, 'in1d') or LooseVersion(np.__version__) < '1.8':
in1d = _in1d
else:
in1d = np.in1d
def _digitize(x, bins, right=False):
"""Replacement for digitize with right kwarg (numpy < 1.7).
Notes
-----
This fix is only meant for integer arrays. If ``right==True`` but either
``x`` or ``bins`` are of a different type, a NotImplementedError will be
raised.
"""
if right:
x = np.asarray(x)
bins = np.asarray(bins)
if (x.dtype.kind not in 'ui') or (bins.dtype.kind not in 'ui'):
raise NotImplementedError("Only implemented for integer input")
return np.digitize(x - 1e-5, bins)
else:
return np.digitize(x, bins)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
digitize = _digitize
else:
digitize = np.digitize
def _tril_indices(n, k=0):
"""Replacement for tril_indices that is provided for numpy >= 1.4"""
mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
indices = np.where(mask)
return indices
if not hasattr(np, 'tril_indices'):
tril_indices = _tril_indices
else:
tril_indices = np.tril_indices
def _unravel_index(indices, dims):
"""Add support for multiple indices in unravel_index that is provided
for numpy >= 1.4"""
indices_arr = np.asarray(indices)
if indices_arr.size == 1:
return np.unravel_index(indices, dims)
else:
if indices_arr.ndim != 1:
raise ValueError('indices should be one dimensional')
ndims = len(dims)
unraveled_coords = np.empty((indices_arr.size, ndims), dtype=np.int)
for coord, idx in zip(unraveled_coords, indices_arr):
coord[:] = np.unravel_index(idx, dims)
return tuple(unraveled_coords.T)
if LooseVersion(np.__version__) < LooseVersion('1.4'):
unravel_index = _unravel_index
else:
unravel_index = np.unravel_index
def _qr_economic_old(A, **kwargs):
"""
Compat function for the QR-decomposition in economic mode
Scipy 0.9 changed the keyword econ=True to mode='economic'
"""
with warnings.catch_warnings(record=True):
return linalg.qr(A, econ=True, **kwargs)
def _qr_economic_new(A, **kwargs):
return linalg.qr(A, mode='economic', **kwargs)
if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
qr_economic = _qr_economic_old
else:
qr_economic = _qr_economic_new
def savemat(file_name, mdict, oned_as="column", **kwargs):
"""MATLAB-format output routine that is compatible with SciPy 0.7's.
0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default
value. It issues a warning if this is not provided, stating that "This will
change to 'row' in future versions."
"""
import scipy.io
try:
return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)
except TypeError:
return scipy.io.savemat(file_name, mdict, **kwargs)
if hasattr(np, 'count_nonzero'):
from numpy import count_nonzero
else:
def count_nonzero(X):
return len(np.flatnonzero(X))
# little dance to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
def _meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,) * (ndim - 2)
output[1].shape = (-1, 1) + (1,) * (ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
meshgrid = _meshgrid
else:
meshgrid = np.meshgrid
###############################################################################
# Back porting firwin2 for older scipy
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) # doctest: +SKIP
>>> print(taps[72:78]) # doctest: +SKIP
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s'
% (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from scipy.signal.signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def get_firwin2():
"""Helper to get firwin2"""
try:
from scipy.signal import firwin2
except ImportError:
firwin2 = _firwin2
return firwin2
def _filtfilt(*args, **kwargs):
"""wrap filtfilt, excluding padding arguments"""
from scipy.signal import filtfilt
# cut out filter args
if len(args) > 4:
args = args[:4]
if 'padlen' in kwargs:
del kwargs['padlen']
return filtfilt(*args, **kwargs)
def get_filtfilt():
"""Helper to get filtfilt from scipy"""
from scipy.signal import filtfilt
if 'padlen' in inspect.getargspec(filtfilt)[0]:
return filtfilt
return _filtfilt
def _get_argrelmax():
try:
from scipy.signal import argrelmax
except ImportError:
argrelmax = _argrelmax
return argrelmax
def _argrelmax(data, axis=0, order=1, mode='clip'):
"""Calculate the relative maxima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative maxima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
"""
comparator = np.greater
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in xrange(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return np.where(results)
###############################################################################
# Back porting matrix_rank for numpy < 1.7
def _matrix_rank(M, tol=None):
""" Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that
are greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for
linear least squares [2].
This default threshold is designed to detect rank deficiency accounting
for the numerical errors of the SVD computation. Imagine that there is a
column in `M` that is an exact (in floating point) linear combination of
other columns in `M`. Computing the SVD on `M` will not produce a
singular value exactly equal to 0 in general: any difference of the
smallest SVD value from 0 will be caused by numerical imprecision in the
calculation of the SVD. Our threshold for small SVD values takes this
numerical imprecision into account, and the default threshold will detect
such numerical rank deficiency. The threshold may declare a matrix `M`
rank deficient even if the linear combination of some columns of `M` is
not exactly equal to another column of `M` but only numerically very
close to another column of `M`.
We chose our default threshold because it is in wide use. Other
thresholds are possible. For example, elsewhere in the 2007 edition of
*Numerical recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance
values to detect *effective* rank deficiency. The most useful measure of
the tolerance depends on the operations you intend to use on your matrix.
For example, if your data come from uncertain measurements with
uncertainties greater than floating point epsilon, choosing a tolerance
near that uncertainty may be preferable. The tolerance may be absolute if
the uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = np.asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return np.int(not all(M == 0))
S = np.linalg.svd(M, compute_uv=False)
if tol is None:
tol = S.max() * np.max(M.shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
if LooseVersion(np.__version__) > '1.7.1':
from numpy.linalg import matrix_rank
else:
matrix_rank = _matrix_rank
def _reconstruct_partial(func, args, kwargs):
"""Helper to pickle partial functions"""
return partial(func, *args, **(kwargs or {}))
def _reduce_partial(p):
"""Helper to pickle partial functions"""
return _reconstruct_partial, (p.func, p.args, p.keywords)
# This adds pickling functionality to older Python 2.6
# Please always import partial from here.
copyreg.pickle(partial, _reduce_partial)
def normalize_colors(vmin, vmax, clip=False):
"""Helper to handle matplotlib API"""
import matplotlib.pyplot as plt
try:
return plt.Normalize(vmin, vmax, clip=clip)
except AttributeError:
return plt.normalize(vmin, vmax, clip=clip)
def assert_true(expr, msg='False is not True'):
"""Fake assert_true without message"""
if not expr:
raise AssertionError(msg)
def assert_is(expr1, expr2, msg=None):
"""Fake assert_is without message"""
assert_true(expr2 is expr2, msg)
def assert_is_not(expr1, expr2, msg=None):
"""Fake assert_is_not without message"""
assert_true(expr1 is not expr2, msg)
def _sparse_block_diag(mats, format=None, dtype=None):
"""An implementation of scipy.sparse.block_diag since old versions of
scipy don't have it. Forms a sparse matrix by stacking matrices in block
diagonal form.
Parameters
----------
mats : list of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the
matrix is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of blocks.
Returns
-------
res : sparse matrix
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None] * nmat
row[ia] = a
rows.append(row)
return sparse.bmat(rows, format=format, dtype=dtype)
try:
from scipy.sparse import block_diag as sparse_block_diag
except Exception:
sparse_block_diag = _sparse_block_diag
def _isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> isclose([1e10,1e-7], [1.00001e10,1e-8])
array([ True, False], dtype=bool)
>>> isclose([1e10,1e-8], [1.00001e10,1e-9])
array([ True, True], dtype=bool)
>>> isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan])
array([ True, False], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([ True, True], dtype=bool)
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
dt = np.core.multiarray.result_type(y, 1.)
y = np.array(y, dtype=dt, copy=False, subok=True)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if np.all(xfin) and np.all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = np.isnan(x) & np.isnan(y)
cond[both_nan] = both_nan[both_nan]
return cond
if LooseVersion(np.__version__) < LooseVersion('1.7'):
isclose = _isclose
else:
isclose = np.isclose
| bsd-3-clause | -7,662,654,578,994,901,000 | 32.297297 | 79 | 0.616883 | false | 3.627085 | false | false | false |
sysopfb/Malware_Scripts | wannacry/decode_dll.py | 1 | 1118 | #For decoded t.wnry file from sample: ed01ebfbc9eb5bbea545af4d01bf5f1071661840480439c6e5babe8e080e41aa
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Hash import SHA
import sys
import struct
import binascii
import hashlib
def decode_rsa(privkey, data):
rsa_key = RSA.importKey(privkey)
cipher = PKCS1_v1_5.new(rsa_key)
sentinel = Random.new().read(16)
d = cipher.decrypt(data[::-1],sentinel)
return d
if __name__ == "__main__":
data = open(sys.argv[1],'rb').read()
privkey = open('privkey.der').read()
hdr = data[:8]
data = data[8:]
size = struct.unpack_from('<I', data)[0]
data = data[4:]
blob1 = data[:size]
data = data[size:]
(id, size) = struct.unpack_from('<IQ', data)
data = data[12:]
blob2 = data[:size]
data = data[size:]
if data != '':
print("More data found!")
key = decode_rsa(privkey, blob1)
aes = AES.new(key, AES.MODE_CBC, '\x00'*16)
decoded = aes.decrypt(blob2)
sha256 = hashlib.sha256(decoded).hexdigest()
open(sha256, 'wb').write(decoded)
print("Wrote decoded file to: "+sha256)
| mit | -1,457,900,662,308,675,000 | 25.619048 | 102 | 0.69678 | false | 2.674641 | false | false | false |
BenjaminEHowe/library-api | library_api/implementations/enterprise.py | 1 | 15276 | import re
import requests
from ..library import NotAuthenticatedError
class library:
def __init__(self, url):
""" Initialises the library. """
self.session = requests.Session()
self.authenticated = False;
return
def login(self, userid, password):
""" Authenticates session for future use. """
# find the "formdata" that we need to submit with the login
r = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/patronlogin/')
formdata = re.search('name="t:ac" type="hidden"></input><input value="(.*?)" name="t:formdata" type="hidden">', r.text).group(1)
# log in
postData = {
'j_username': userid,
'j_password': password,
't:formdata': formdata }
r = self.session.post('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/patronlogin.loginpageform/LIVE?&t:ac=$N', postData)
if "new RedirectAfterLogin('null');" in r.text:
# if we get redirected, the login was successful!
self.authenticated = True
return True
else:
return False
def search(self, query=None, title=None, author=None, ean=None):
""" Performs a search, returning a (potentially empty) list of
items. Optionally, search only within the title or author or
EAN / ISBN-13 attributes. """
# perform the search
if query:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?qu=' + query)
elif title:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?rt=false%7C%7C%7CTITLE%7C%7C%7CTitle&qu=' + title)
elif author:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?rt=false%7C%7C%7CAUTHOR%7C%7C%7CAuthor&qu=' + author)
elif ean:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?rt=false%7C%7C%7CISBN%7C%7C%7CISBN&qu=' + ean)
else:
raise ValueError
results = []
# interpret the results
if 'Holds:' in result.text:
# if we got to the page for a single item
# item type, checks are orders so the most likely check to
# pass is done first
if '<div class="displayElementText GENRE_TERM"><a title="Electronic books" alt="Electronic books"' in result.text:
# ebooks also have ISBN so we have to check this first
itemtype = 'ebook'
elif re.search('Loan Type.{0,350}?PERIODICAL', result.text, re.DOTALL):
itemtype = 'journal'
elif '<div class="displayElementText GENRE_TERM"><a title="Electronic journals." alt="Electronic journals."' in result.text:
itemtype = 'ejournal'
elif re.search('Call Number.{0,450}?THESIS(?: -|--)', result.text, re.DOTALL):
# thesis
itemtype = 'academic_paper'
elif re.search('<div class="displayElementText TITLE">.{0,100}?\[cartographic material\]', result.text):
# map / atlas
itemtype = 'map'
elif 'Electronic access:' in result.text:
# electronic resources / cd-rom
itemtype = 'electronic'
elif re.search('<div class="displayElementText TITLE">.{0,100}?\[sound recording\]', result.text):
# sound recording
itemtype = 'audio'
elif re.search('<div class="displayElementText TITLE">.{0,100}?\[videorecording\]', result.text):
# dvd / video casette / visual materials
itemtype = 'video'
elif 'ISBN:' in result.text:
# if it's nothing else and it has an ISBN it's probably a book
itemtype = 'book'
else:
# archive, object, kit
itemtype = 'other'
# find an ID number to use
identifier = None
if itemtype == 'journal' or itemtype == 'ejournal':
try:
identifier = re.search('<div class="displayElementText ISSN_LOCAL">(\d{4}-\d{4})', result.text).group(1)
except AttributeError:
pass
elif itemtype == 'academic_paper':
identifier = re.search('Call Number.{0,4500}?THESIS(?: -|--|-)(R\d{0,6})', result.text, re.DOTALL).group(1)
else:
try:
identifier = re.search('<div class="displayElementText LOCAL_ISBN">(\d{13})', result.text).group(1)
except AttributeError:
# no ISBN-13 / EAN recorded, drop to ISBN-10
try:
identifier = re.search('<div class="displayElementText LOCAL_ISBN">(\d{10})', result.text).group(1)
except AttributeError:
pass
if identifier == None: # if we couldn't find an ISBN / ISSN
identifier = re.search("'ent://SD_ILS/\d{0,8}?/SD_ILS:(\d{0,10}?)'", result.text).group(1)
# title
fulltitle = re.search('<div class="displayElementText TITLE">(.*?)<\/div>', result.text).group(1)
try:
title = re.search('(.*?)(?: :| \/|\.)', fulltitle).group(1)
except AttributeError:
title = fulltitle # if the full title is also the normal title
if ' / ' in fulltitle:
# if the author has been embedded in the title use that
# as it's generally more accurate
author = re.search('.*? (?:\/ by|\/) (.*?)(?: ;|\.$|$)', fulltitle).group(1).split(', ')
elif 'Personal author:' in result.text:
# the personal author generally only includes the first
# author, but it's the best we've got. it also sometimes
# includes the years the author was alive, which is
# annoying
match = re.search('<div class="displayElementText PERSONAL_AUTHOR"><a title="(.*?), (.*?,|.*?\.).*?" alt=', result.text)
first = match.group(2).rstrip(',')
second = match.group(1)
author = [first + ' ' + second]
elif 'Added corporate author' in result.text:
corporate_authors = "".join(re.findall('<div class="displayElementText ADDED_CORPORATE_AUTHOR">(.*?)</div>', result.text))
author = re.findall('<a .*?>(.*?)</a>', corporate_authors)
else:
# not much else we can do other than return unknown
author = ['unknown']
results.append( {
'id': identifier,
'title': title,
'author': author,
'type': itemtype,
} )
elif 'results found' in result.text:
# if we got to a page with lots of results
number_of_results = re.search('(\d{0,7}?) results found', result.text).group(1)
#if number_of_results > 120:
# cap at 120 otherwise getting results could be slow
# number_of_results = 120
print (result.text)
while len(results) < int(number_of_results):
types = re.findall('<div class="displayElementText highlightMe UR_FORMAT"> (.*?)</div>', result.text)
for i in range(len(types)):
# title
fulltitle = re.search('<a id="detailLink' + str(i) + '" title="(.*?)"', result.text).group(1)
print (str(i))
print(fulltitle)
try:
title = re.search('(.*?)(?: :| \/|\.)', fulltitle).group(1)
except AttributeError:
pass # if the full title is also the normal title
if ' / ' in fulltitle:
# if the author has been embedded in the title use that
# as it's generally more accurate
author = re.search('.*? (?:\/ by|\/) (.*?)(?: ;|\.$|$)', fulltitle).group(1).split(', ')
else:
author = ['unknown']
# type
if types[i] == 'Thesis':
itemtype = 'academic_paper'
elif types[i] == 'Sound disc':
itemtype = 'audio'
elif types[i] == 'Book':
if '[electronic resource]' in title:
itemtype = 'ebook'
else:
itemtype = 'book'
elif types[i] == 'Electronic Resources' or types[i] == 'CD-ROM':
itemtype = 'electronic'
elif types[i] == 'Journal':
if '[electronic resource]' in title:
itemtype = 'ejournal'
else:
itemtype = 'journal'
elif types[i] == 'Maps' or types[i] == 'Atlas':
itemtype = 'map'
elif types[i] == 'Printed music':
itemtype = 'audio'
elif types[i] == 'DVD' or types[i] == 'Video casette' or types[i] == 'Visual Materials':
itemtype = 'video'
else:
itemtype = 'other'
# identifier
identifier = None
try:
identifier = re.search('<div id="hitlist' + str(i) + '_ISBN"><div class="ISBN_value">(\d{13})', result.text).group(1)
except AttributeError:
try:
identifier = re.search('<div id="hitlist' + str(i) + '_ISSN"><div class="ISSN_value">(\d\d\d\d-\d\d\d\d)', result.text).group(1)
except AttributeError:
pass
if identifier == None:
identifier = re.search('(\d{0,10})" type="hidden" id="da' + str(i) + '"', result.text).group(1)
results.append( {
'id': identifier,
'title': title,
'author': author,
'type': itemtype,
} )
if len(results) % 12 == 0: # we'll have run out of results, get more
if query:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?qu=' + query + '&rw=' + str(len(results)))
elif title:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?rt=false%7C%7C%7CTITLE%7C%7C%7CTitle&qu=' + title + '&rw=' + str(len(results)))
elif author:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?rt=false%7C%7C%7CAUTHOR%7C%7C%7CAuthor&qu=' + author + '&rw=' + str(len(results)))
elif ean:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?rt=false%7C%7C%7CISBN%7C%7C%7CISBN&qu=' + ean + '&rw=' + str(len(results)))
print (result.text)
return results
def list_items(self):
""" Returns a list of items the borrower has (currently formatted
as a list of enterprise IDs). """
if not self.authenticated:
raise NotAuthenticatedError
r = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account?')
# for some insane reason it's nessesary to get the holds to get an ID to get checkouts...
r = self.session.post('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account.holdsajax/true?', {'t:zoneid': 'holdsAjax'}, headers={'X-Requested-With': 'XMLHttpRequest'})
zoneid = re.search("<div class='hidden t-zone' id='(.*?)'>", r.text).group(1)
# request list of books checked out
r = self.session.post('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account.finesandcheckouts/-1/-1/$B/0/true?', {'t:zoneid': zoneid}, headers={'X-Requested-With': 'XMLHttpRequest'})
books = re.findall('<span>([X\d]{10})<\\\/span>', r.text)
return books
def renew_all(self):
r = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account?')
# for some insane reason it's nessesary to get the holds to get an ID to get checkouts...
r = self.session.post('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account.holdsajax/true?', {'t:zoneid': 'holdsAjax'}, headers={'X-Requested-With': 'XMLHttpRequest'})
zoneid = re.search("<div class='hidden t-zone' id='(.*?)'>", r.text).group(1)
# request list of books checked out
r = self.session.post('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account.finesandcheckouts/-1/-1/$B/0/true?', {'t:zoneid': zoneid}, headers={'X-Requested-With': 'XMLHttpRequest'})
items = self.list_items()
numberOfItems = len(items)
formdata = re.search("<div class='t-invisible'><input value='(.*?)' name='t:formdata' type='hidden'>", r.text).group(1)
listSubmitId = re.search("<input value='submit' class='hidden' id='(.*?)'", r.text).group(1)
# renew items
postData = {
't:formdata': formdata,
't:submit': '["' + listSubmitId + '[","myCheckouts_checkoutslist_submit"]',
't:zoneid': 'checkoutItemsZone'}
for i in range(numberOfItems):
if i == 0: # special case
postData['checkbox'] = 'on'
else:
postData['checkbox_' + str(i-1)] = 'on'
r = self.session.post('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account.checkouts.checkoutslist.form?pc=%7B%22checkoutsList%22%3A%22%22%7D', postData, headers={'X-Requested-With': 'XMLHttpRequest'})
renewalStatus = {}
for item in items:
# check it renewed successfully
if re.search(item + "<\\\/span><br/><span class='checkoutsRenewed'>Renewal succeeded.<\\\/span>", r.text):
renewalStatus[item] = [True]
else:
renewalStatus[item] = [False]
# fix this for "item recalled"
dueDateMatch = re.search(item + ".*?class='checkoutsDueDate'>(\d\d)\/(\d\d)\/(\d\d)<\\\/td>", r.text)
dueDate = '20' + dueDateMatch.group(3) + '-' + dueDateMatch.group(2) + '-' + dueDateMatch.group(1)
renewalStatus[item].append(dueDate)
return renewalStatus
| mit | -5,243,826,327,986,438,000 | 53.752688 | 224 | 0.515449 | false | 3.889002 | false | false | false |
kyoshino/bedrock | bedrock/releasenotes/tests/test_models.py | 1 | 6304 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from itertools import chain
from django.core.cache import caches
from django.test.utils import override_settings
from mock import call, patch
from pathlib2 import Path
from bedrock.mozorg.tests import TestCase
from bedrock.releasenotes import models
RELEASES_PATH = str(Path(__file__).parent)
release_cache = caches['release-notes']
@patch('bedrock.releasenotes.models.reverse')
class TestReleaseNotesURL(TestCase):
def test_aurora_android_releasenotes_url(self, mock_reverse):
"""
Should return the results of reverse with the correct args
"""
release = models.ProductRelease(channel='Aurora', version='42.0a2', product='Firefox for Android')
assert release.get_absolute_url() == mock_reverse.return_value
mock_reverse.assert_called_with('firefox.android.releasenotes', args=['42.0a2', 'aurora'])
def test_desktop_releasenotes_url(self, mock_reverse):
"""
Should return the results of reverse with the correct args
"""
release = models.ProductRelease(version='42.0', product='Firefox')
assert release.get_absolute_url() == mock_reverse.return_value
mock_reverse.assert_called_with('firefox.desktop.releasenotes', args=['42.0', 'release'])
@override_settings(RELEASE_NOTES_PATH=RELEASES_PATH, DEV=False)
class TestReleaseModel(TestCase):
def setUp(self):
models.ProductRelease.objects.refresh()
release_cache.clear()
def test_release_major_version(self):
rel = models.get_release('firefox', '57.0a1')
assert rel.major_version == '57'
def test_get_bug_search_url(self):
rel = models.get_release('firefox', '57.0a1')
assert '=Firefox%2057&' in rel.get_bug_search_url()
rel.bug_search_url = 'custom url'
assert 'custom url' == rel.get_bug_search_url()
def test_equivalent_release_for_product(self):
"""Based on the test files the equivalent release for 56 should be 56.0.2"""
rel = models.get_release('firefox', '56.0', 'release')
android = rel.equivalent_release_for_product('Firefox for Android')
assert android.version == '56.0.2'
assert android.product == 'Firefox for Android'
def test_equivalent_release_for_product_none_match(self):
rel = models.get_release('firefox', '45.0esr')
android = rel.equivalent_release_for_product('Firefox for Android')
assert android is None
def test_note_fixed_in_release(self):
rel = models.get_release('firefox', '55.0a1')
note = rel.notes[11]
with self.activate('en-US'):
assert note.fixed_in_release.get_absolute_url() == '/en-US/firefox/55.0a1/releasenotes/'
def test_field_processors(self):
rel = models.get_release('firefox', '57.0a1')
# datetime conversion
assert rel.created.year == 2017
# datetime conversion
assert rel.modified.year == 2017
# date conversion
assert rel.release_date.year == 2017
# markdown
assert rel.system_requirements.startswith('<h2 id="windows">Windows</h2>')
# version
assert rel.version_obj.major == 57
# notes
note = rel.notes[0]
# datetime conversion
assert note.created.year == 2017
# datetime conversion
assert note.modified.year == 2017
# markdown
assert note.note.startswith('<p>Firefox Nightly')
assert note.id == 787203
@override_settings(DEV=False)
def test_is_public_query(self):
"""Should not return the release value when DEV is false.
Should also only include public notes."""
assert models.get_release('firefox for android', '56.0.3') is None
rel = models.get_release('firefox', '57.0a1')
assert len(rel.notes) == 4
@override_settings(DEV=True)
def test_is_public_field_processor_dev_true(self):
"""Should always be true when DEV is true."""
models.get_release('firefox for android', '56.0.3')
rel = models.get_release('firefox', '57.0a1')
assert len(rel.notes) == 6
@patch.object(models.ProductRelease, 'objects')
class TestGetRelease(TestCase):
def setUp(self):
release_cache.clear()
def test_get_release(self, manager_mock):
manager_mock.product().get.return_value = 'dude is released'
assert models.get_release('Firefox', '57.0') == 'dude is released'
manager_mock.product.assert_called_with('Firefox', models.ProductRelease.CHANNELS[0], '57.0', False)
def test_get_release_esr(self, manager_mock):
manager_mock.product().get.return_value = 'dude is released'
assert models.get_release('Firefox Extended Support Release', '51.0') == 'dude is released'
manager_mock.product.assert_called_with('Firefox Extended Support Release', 'esr', '51.0', False)
def test_get_release_none_match(self, manager_mock):
"""Make sure the proper exception is raised if no file matches the query"""
manager_mock.product().get.side_effect = models.ProductRelease.DoesNotExist
assert models.get_release('Firefox', '57.0') is None
expected_calls = chain.from_iterable(
(call('Firefox', ch, '57.0', False), call().get()) for ch in models.ProductRelease.CHANNELS)
manager_mock.product.assert_has_calls(expected_calls)
@override_settings(RELEASE_NOTES_PATH=RELEASES_PATH, DEV=False)
class TestGetLatestRelease(TestCase):
def setUp(self):
models.ProductRelease.objects.refresh()
release_cache.clear()
def test_latest_release(self):
correct_release = models.get_release('firefox for android', '56.0.2')
assert models.get_latest_release('firefox for android', 'release') == correct_release
def test_non_public_release_not_duped(self):
# refresh again
models.ProductRelease.objects.refresh()
release_cache.clear()
# non public release
# should NOT raise multiple objects error
assert models.get_release('firefox for android', '56.0.3', include_drafts=True)
| mpl-2.0 | -4,550,439,496,601,802,000 | 39.670968 | 108 | 0.663071 | false | 3.770335 | true | false | false |
adam900710/btrfs-progs | libbtrfsutil/python/setup.py | 1 | 2883 | #!/usr/bin/env python3
# Copyright (C) 2018 Facebook
#
# This file is part of libbtrfsutil.
#
# libbtrfsutil is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libbtrfsutil is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with libbtrfsutil. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import os.path
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import subprocess
def get_version():
f = open('../../VERSION', 'r')
version = f.readline().strip()
f.close()
return ".".join(version[1:].split('.'))
def out_of_date(dependencies, target):
dependency_mtimes = [os.path.getmtime(dependency) for dependency in dependencies]
try:
target_mtime = os.path.getmtime(target)
except OSError:
return True
return any(dependency_mtime >= target_mtime for dependency_mtime in dependency_mtimes)
def gen_constants():
with open('../btrfsutil.h', 'r') as f:
btrfsutil_h = f.read()
constants = re.findall(
r'^\s*(BTRFS_UTIL_ERROR_[a-zA-Z0-9_]+)',
btrfsutil_h, flags=re.MULTILINE)
with open('constants.c', 'w') as f:
f.write("""\
#include <btrfsutil.h>
#include "btrfsutilpy.h"
void add_module_constants(PyObject *m)
{
""")
for constant in constants:
assert constant.startswith('BTRFS_UTIL_')
name = constant[len('BTRFS_UTIL_'):]
f.write('\tPyModule_AddIntConstant(m, "{}", {});\n'.format(name, constant))
f.write("""\
}
""")
class my_build_ext(build_ext):
def run(self):
if out_of_date(['../btrfsutil.h'], 'constants.c'):
try:
gen_constants()
except Exception as e:
try:
os.remove('constants.c')
except OSError:
pass
raise e
super().run()
module = Extension(
name='btrfsutil',
sources=[
'constants.c',
'error.c',
'filesystem.c',
'module.c',
'qgroup.c',
'subvolume.c',
],
include_dirs=['..'],
library_dirs=['../..'],
libraries=['btrfsutil'],
)
setup(
name='btrfsutil',
version=get_version(),
description='Library for managing Btrfs filesystems',
url='https://github.com/kdave/btrfs-progs',
license='LGPLv3',
cmdclass={'build_ext': my_build_ext},
ext_modules=[module],
)
| gpl-2.0 | -551,273,148,070,264,200 | 26.198113 | 90 | 0.6188 | false | 3.60375 | false | false | false |
dragondjf/PFramer | objbrowser/app.py | 1 | 7811 | """ Module for IPython event loop integration.
Two things are handled by this module .
1) Creating the QApplication instance (or getting the singleton if it already
exists). Also no difference between IPython and the regular Python.
2) Starting the event loop.
If IPython is not running, qApp.exec_() is called, which is blocking.
The IPython.lib.guisupport.start_event_loop_qt4() function is used. If no
event loop is yet running, it will start a blocking event loop. If an event
loop is running, start_event_loop_qt4() will do nothing and return. It is
therefore non-blocking. This makes user interaction from the command
line possible.
The user can start an IPython event loop by calling the '%gui qt' magic command,
by starting IPython with the --qui=qt command line option, or by setting
c.TerminalIPythonApp.gui = 'qt' in ~/.ipython/<profile>/ipython_config.py
See also:
http://ipython.readthedocs.org/en/stable/api/generated/IPython.lib.guisupport.html
Known issues:
1) Starting; ipython --gui=qt main.py
Since this will start a non-blocking event loop before calling main, the
application exits as soon as it is created. Use the IPython -i option to
stay in IPython after the script has finished.
So run: ipython --gui=qt -i main.py
2) PyQT4 has two API versions: Python 2 uses API v1 by default, Python 3
uses v2 (PySide only implements the v2 API). The API version must be set
before PyQt4 is imported!
This program is written for v2 so if v1 is already running, an error will
occur. If you use the iptyhon --qui=qt command line option to start an
event loop (and make interaction from the command line possible), IPython-2
will start API v1 if PyQt is configured. To force IPython-2 to use the
v2 API, the QT_API environment variable must be set to 'pyqt'.
This works, unfortunately IPython 4.0.0 contains a bug and raises the
following ImportError: No module named qt. As a work around you can,
1: Ignore the ImportError
2: Import PyQt4 (or PySide) manually. In IPython type: import PyQt4.QtCore
3: Start the event loop with: %gui qt
Also IPython 5.0.0 and 5.1.0 contain a bug so it won't work there as well.
See https://github.com/ipython/ipython/issues/9974. It is expected to be fixed
in IPython 5.2.0
"""
import sys, logging, traceback
logger = logging.getLogger(__name__)
from objbrowser.qtpy import QtCore, QtWidgets
from objbrowser.version import DEBUGGING, PROGRAM_NAME
def in_ipython():
""" Returns True if IPython is running, False for the regular Python.
"""
try:
from IPython.core.getipython import get_ipython
except ImportError:
return False
else:
return get_ipython() is not None
def qapp_exists():
""" Returns true if a QApplicaiotn is already running
"""
return QtWidgets.QApplication.instance() is not None
def get_qapp(*args, **kwargs):
""" Gets the global Qt application object. Creates one if it doesn't exist.
"""
qApp = QtWidgets.QApplication.instance()
if qApp:
logger.debug("Returning existing QApplication")
return qApp
else:
logger.debug("Creating new QApplication")
return QtWidgets.QApplication(*args, **kwargs)
def get_qsettings():
""" Creates a QSettings object for this application.
We do not set the application and organization in the QApplication object to
prevent side-effects.
"""
return QtCore.QSettings("titusjan.nl", PROGRAM_NAME)
def start_qt_event_loop(qApp):
""" Starts the eventloop if it's not yet running.
If the IPython event loop is active (and set to Qt) this function does nothing. The IPython
event loop will process Qt events as well so the user can continue to use the command
prompt together with the ObjectBrower. Unfortunately this behaviour is broken again in
IPython 5, so there we fall back on the non-interactive event loop.
"""
if in_ipython():
from IPython import version_info
logger.debug("IPython detected. Version info: {}".format(version_info))
if version_info[0] < 4:
logger.debug("Event loop integration not supported for IPython < 4")
elif version_info[0] == 5 and version_info[1] <= 1:
# The is_event_loop_running_qt4 function is broken in IPython 5.0 and 5.1.
# https://github.com/ipython/ipython/issues/9974
logger.debug("Event loop integration does not work in IPython 5.0 and 5.1")
else:
try:
from IPython.lib.guisupport import is_event_loop_running_qt4, start_event_loop_qt4
if is_event_loop_running_qt4(qApp):
logger.info("IPython event loop already running. GUI integration possible.")
else:
# No gui integration
logger.info("Starting (non-interactive) IPython event loop")
start_event_loop_qt4(qApp) # exit code always 0
return
except Exception as ex:
logger.warning("Unable to start IPython Qt event loop: {}".format(ex))
logger.warning("Falling back on non-interactive event loop: {}".format(ex))
logger.info("Starting (non-interactive) event loop")
return qApp.exec_()
def handleException(exc_type, exc_value, exc_traceback):
""" Causes the application to quit in case of an unhandled exception (as God intended)
Shows an error dialog before quitting when not in debugging mode.
"""
traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.critical("Bug: uncaught {}".format(exc_type.__name__),
exc_info=(exc_type, exc_value, exc_traceback))
if DEBUGGING:
sys.exit(1)
else:
# Constructing a QApplication in case this hasn't been done yet.
if not QtWidgets.qApp:
_app = QtWidgets.QApplication()
msgBox = ResizeDetailsMessageBox()
msgBox.setText("Bug: uncaught {}".format(exc_type.__name__))
msgBox.setInformativeText(str(exc_value))
lst = traceback.format_exception(exc_type, exc_value, exc_traceback)
msgBox.setDetailedText("".join(lst))
msgBox.setIcon(QtWidgets.QMessageBox.Warning)
msgBox.exec_()
sys.exit(1)
class ResizeDetailsMessageBox(QtWidgets.QMessageBox):
""" Message box that enlarges when the 'Show Details' button is clicked.
Can be used to better view stack traces. I could't find how to make a resizeable message
box but this it the next best thing.
Taken from:
http://stackoverflow.com/questions/2655354/how-to-allow-resizing-of-qmessagebox-in-pyqt4
"""
def __init__(self, detailsBoxWidth=700, detailBoxHeight=300, *args, **kwargs):
""" Constructor
:param detailsBoxWidht: The width of the details text box (default=700)
:param detailBoxHeight: The heights of the details text box (default=700)
"""
super(ResizeDetailsMessageBox, self).__init__(*args, **kwargs)
self.detailsBoxWidth = detailsBoxWidth
self.detailBoxHeight = detailBoxHeight
def resizeEvent(self, event):
""" Resizes the details box if present (i.e. when 'Show Details' button was clicked)
"""
result = super(ResizeDetailsMessageBox, self).resizeEvent(event)
details_box = self.findChild(QtWidgets.QTextEdit)
if details_box is not None:
#details_box.setFixedSize(details_box.sizeHint())
details_box.setFixedSize(QtCore.QSize(self.detailsBoxWidth, self.detailBoxHeight))
return result
| gpl-3.0 | 3,190,367,816,464,771,600 | 40.547872 | 100 | 0.669953 | false | 4.104572 | false | false | false |
singularityhub/sregistry | shub/apps/logs/mixins.py | 1 | 4621 | """
Adopted from drf-tracking
https://github.com/aschn/drf-tracking
"""
from django.utils.timezone import now
from shub.apps.logs.models import APIRequestLog
from shub.apps.logs.utils import clean_data
from rest_framework.authtoken.models import Token
import traceback
class BaseLoggingMixin(object):
logging_methods = "__all__"
"""Mixin to log requests"""
def initial(self, request, *args, **kwargs):
ipaddr = request.META.get("HTTP_X_FORWARDED_FOR", None)
if ipaddr:
# X_FORWARDED_FOR returns client1, proxy1, proxy2,...
ipaddr = [x.strip() for x in ipaddr.split(",")][0]
else:
ipaddr = request.META.get("REMOTE_ADDR", "")
# get view
view_name = ""
try:
method = request.method.lower()
attributes = getattr(self, method)
view_name = (
type(attributes.__self__).__module__
+ "."
+ type(attributes.__self__).__name__
)
except:
pass
# get the method of the view
if hasattr(self, "action"):
view_method = self.action if self.action else ""
else:
view_method = method.lower()
try:
params = clean_data(request.query_params.dict())
except:
params = {}
# create log
self.request.log = APIRequestLog(
requested_at=now(),
path=request.path,
view=view_name,
view_method=view_method,
remote_addr=ipaddr,
host=request.get_host(),
method=request.method,
query_params=params,
)
# regular initial, including auth check
super(BaseLoggingMixin, self).initial(request, *args, **kwargs)
# add user to log after auth
user = request.user
if user.is_anonymous:
user = None
# Get a user, if auth token is provided
auth_header = request.META.get("HTTP_AUTHORIZATION")
if auth_header:
try:
token = Token.objects.get(key=auth_header.replace("BEARER", "").strip())
user = token.user
except Token.DoesNotExist:
pass
self.request.log.user = user
# get data dict
try:
# Accessing request.data *for the first time* parses the request body, which may raise
# ParseError and UnsupportedMediaType exceptions. It's important not to swallow these,
# as (depending on implementation details) they may only get raised this once, and
# DRF logic needs them to be raised by the view for error handling to work correctly.
self.request.log.data = clean_data(self.request.data.dict())
except AttributeError: # if already a dict, can't dictify
self.request.log.data = clean_data(self.request.data)
def handle_exception(self, exc):
# basic handling
response = super(BaseLoggingMixin, self).handle_exception(exc)
# log error
if hasattr(self.request, "log"):
self.request.log.errors = traceback.format_exc()
# return
return response
def finalize_response(self, request, response, *args, **kwargs):
# regular finalize response
response = super(BaseLoggingMixin, self).finalize_response(
request, response, *args, **kwargs
)
# check if request is being logged
if not hasattr(self.request, "log"):
return response
# compute response time
response_timedelta = now() - self.request.log.requested_at
response_ms = int(response_timedelta.total_seconds() * 1000)
# save to log
if self._should_log(request, response):
self.request.log.response = response.rendered_content
self.request.log.status_code = response.status_code
self.request.log.response_ms = response_ms
self.request.log.save()
return response
def _should_log(self, request, response):
"""
Method that should return True if this request should be logged.
By default, check if the request method is in logging_methods.
"""
return (
self.logging_methods == "__all__" or request.method in self.logging_methods
)
class LoggingMixin(BaseLoggingMixin):
pass
class LoggingErrorsMixin(BaseLoggingMixin):
"""Log only errors"""
def _should_log(self, request, response):
return response.status_code >= 400
| mpl-2.0 | -5,573,458,890,350,275,000 | 30.222973 | 98 | 0.58667 | false | 4.347131 | false | false | false |
sbillaudelle/labbook | labbook/cli.py | 1 | 2486 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
__all__ = ['Interface', 'main']
import os
import datetime
from .exceptions import *
from .labbook import LabBook
class Interface(object):
def __init__(self):
pass
def create(self, args):
path = os.path.abspath(args.path)
try:
labbook = LabBook.create(path)
except LabBookAlreadyExistsError as exc:
print(exc.message)
else:
print("I created a labbook for you in '{0}'. Get to work!".format(labbook.path))
def run(self, args):
path = os.getcwd()
try:
labbook = LabBook(path)
except LabBookNotFoundError as exc:
print(exc.message)
else:
labbook.run(args.command_line)
def log(self, args):
path = os.getcwd()
try:
labbook = LabBook(path)
except LabBookNotFoundError as exc:
print(exc.message)
else:
for experiment in labbook.log():
print("{date}: {cmd} ({uuid})".format(
date = datetime.datetime.fromtimestamp(float(experiment.date)).strftime('%a %b %d %H:%M:%S %Y'),
cmd = experiment.command_line,
uuid = experiment.uuid
))
if experiment.comment:
print("\n {0}\n".format(experiment.comment))
else:
print("\n (no comment)\n")
def comment(self, args):
path = os.getcwd()
try:
labbook = LabBook(path)
except LabBookNotFoundError as exc:
print(exc.message)
else:
try:
labbook.set_comment(args.uuid, args.comment)
except (UUIDNotFoundError, AmbiguousUUIDError) as exc:
print(exc.message)
def main():
import argparse
parser = argparse.ArgumentParser()
sub = parser.add_subparsers(dest='command')
run_parser = sub.add_parser('run')
run_parser.add_argument('command_line', type=str, nargs='+')
log_parser = sub.add_parser('log')
log_parser = sub.add_parser('create')
log_parser.add_argument('path', type=str)
log_parser = sub.add_parser('comment')
log_parser.add_argument('uuid', type=str, nargs='?')
log_parser.add_argument('comment', type=str)
args = parser.parse_args()
interface = Interface()
getattr(interface, args.command)(args)
| gpl-2.0 | 271,354,617,792,468,030 | 28.247059 | 120 | 0.54827 | false | 4.042276 | false | false | false |
frhumanes/consulting | web/deploy/wtdeploy/wtdeploy/modules/fab_mysql.py | 1 | 2073 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
#
# author: javi santana
from fabric.api import *
def install_mysql():
""" ripped from http://www.muhuk.com/2010/05/how-to-install-mysql-with-fabric/
"""
with settings(hide('warnings', 'stderr'), warn_only=True):
result = sudo('dpkg-query --show mysql-server')
if result.failed is False:
warn('MySQL is already installed')
return
mysql_password = env.database_admin_pass
sudo('echo "mysql-server-5.5 mysql-server/root_password password ' \
'%s" | debconf-set-selections' % mysql_password)
sudo('echo "mysql-server-5.5 mysql-server/root_password_again password ' \
'%s" | debconf-set-selections' % mysql_password)
sudo('apt-get install -y mysql-server')
def install(conf_folder):
install_mysql()
sudo("apt-get -y install mysql-client libmysqlclient-dev") # dev libraries for compile python bindings
def copy_conf_files(conf_folder):
pass
def create_database(name, encoding='utf8'):
with settings(warn_only=True):
run_mysql_sudo('create database %s character set %s' % (name, encoding))
def set_password(user, password):
sudo("mysqladmin -u %s password %s" % (user, password))
def create_user(user, password):
run_mysql_sudo("CREATE USER %s IDENTIFIED BY '%s'" % (user, password))
def drop_user(user):
with settings(warn_only=True):
run_mysql_sudo("DROP USER %s" % (user))
def user_perms(user, database, password):
run_mysql_sudo("GRANT ALL ON %s.* TO %s@'localhost' IDENTIFIED BY '%s'" % (database, user, password))
def run_mysql_sudo(cmd):
run('echo "' + cmd + '" | mysql -u%(database_admin)s -p%(database_admin_pass)s' % env)
def get_dump(name, user, password, where):
# todo make temporally file
run("mysqldump -u%s -p%s %s | gzip > /tmp/db_dump.sql.gz" % (user, password, name));
get("/tmp/db_dump.sql.gz", where)
def drop_database():
with settings(warn_only=True):
run_mysql_sudo("DROP DATABASE %s" % env.database_name)
| apache-2.0 | -271,757,417,475,889,120 | 34.741379 | 105 | 0.641582 | false | 3.249216 | false | false | false |
jimmy201602/webterminal | permission/forms.py | 1 | 4283 | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.models import Permission as AuthPermission
from permission.models import Permission
from django.contrib.contenttypes.models import ContentType
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Field
from django.db import models
from django.forms.widgets import CheckboxSelectMultiple
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
class RegisterForm(forms.Form):
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-2'
self.helper.field_class = 'col-md-8'
self.helper.layout = Layout(*[Div(field, css_class='form-group')
for field in ['user', 'newpassword1', 'newpassword2', 'email']])
self.instance = False
if 'instance' in kwargs.keys():
kwargs.pop('instance')
self.instance = True
super(RegisterForm, self).__init__(*args, **kwargs)
user = forms.CharField(
required=True,
label=_(u"user name"),
error_messages={'required': _(u'Please input a valid user.')},
max_length=100,
widget=forms.TextInput(
attrs={
'class': u"form-control",
}
)
)
newpassword1 = forms.CharField(
required=True,
label=_(u"your password"),
error_messages={'required': _(u'Please input your password')},
widget=forms.PasswordInput(
attrs={
'placeholder': _(u"new password"),
'class': u"form-control",
}
)
)
newpassword2 = forms.CharField(
required=True,
label=_(u"verify your password"),
error_messages={'required': _(u'please input your password again')},
widget=forms.PasswordInput(
attrs={
'placeholder': _(u"verify your password"),
'class': u"form-control",
}
)
)
email = forms.EmailField(
required=True,
label=_(u"email"),
error_messages={'required': _(
u'Please input a valid email address.')},
widget=forms.EmailInput(
attrs={
'class': u"form-control",
}
)
)
def clean(self):
if not self.is_valid():
raise forms.ValidationError({'user': _(u"every filed required")})
elif self.cleaned_data['newpassword1'] != self.cleaned_data['newpassword2']:
raise forms.ValidationError({'newpassword1': _(
u"your password does't the same"), 'newpassword2': _(u"your password does't the same")})
elif self.cleaned_data['user']:
if not self.instance:
if User.objects.filter(username=self.cleaned_data['user']):
raise forms.ValidationError(
{'user': _(u"User name has been registered!")})
cleaned_data = super(RegisterForm, self).clean()
return cleaned_data
class CustomModelMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
return force_text(_(obj.name))
class PermissionForm(forms.ModelForm):
permissions = CustomModelMultipleChoiceField(queryset=AuthPermission.objects.
filter(content_type__app_label__in=[
'common', 'permission'], codename__contains='can_'),
widget=forms.CheckboxSelectMultiple())
def __init__(self, *args, **kwargs):
super(PermissionForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-2'
self.helper.field_class = 'col-md-8'
self.helper.layout = Layout(*[Div(field, css_class='form-group')
for field in ['user', 'permissions', 'groups']])
class Meta:
model = Permission
fields = ['user', 'permissions', 'groups']
| gpl-3.0 | 4,408,388,683,405,748,700 | 37.585586 | 108 | 0.573663 | false | 4.452183 | false | false | false |
haeihaiehaei/Python-Projects | vmware/op5_vcsa_plugin/check_vcsa.py | 1 | 4239 | #!/usr/bin/python
"""
Op5 check to get the health of the VCenter Appliance via REST API.
Copyright 2017 Martin Persson
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
author = 'Martin Persson'
url = 'https://github.com/haeihaiehaei/Python-Projects/blob/master/vmware/op5_vcsa_plugin/check_vcsa.py'
version = '0.1'
try:
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import json
import sys
import argparse
import base64
except ImportError:
print "Error: missing one of the libraries (requests, json, sys, argparse, base64)"
sys.exit()
# Disable the unverified HTTPS warnings. We are not running certificates.
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Handle our arguments here.
parser = argparse.ArgumentParser(
description=__doc__,
epilog='Developed by %s - For more information see: "%s"'
% (author, url))
parser.add_argument('-u', '--username', dest='username', required=True, help='Username, ex administrator')
parser.add_argument('-p', '--password', dest='password', required=True, help='Password for the user')
parser.add_argument('-d', '--domain', dest='domain', required=True, help='domain name for the vcenter')
parser.add_argument('-U', '--url', dest='url', required=True, help='url to the vcenter')
parser.add_argument('-c', '--check', dest='check', required=True, help='what are we checking, the following is avaliable: database-storage, load, mem, storage')
parser.add_argument('-v', '--version', action='version', version='%(prog)s (version 0.16)')
args = parser.parse_args()
def login():
credentials = str(args.username) + ":" + str(args.password)
# To send the authentication header we need to convert it to Base64.
b64credentials = "Basic" + " " + base64.b64encode(credentials)
url = "https://" + str(args.url) + "." + str(args.domain) + "/rest/com/vmware/cis/session"
payload = ""
headers = {
'content-type': "application/json",
'authorization': b64credentials,
}
# Set the session_id to a global variable so we can use it later.
global session_id
session = requests.request("POST", url, data=payload, headers=headers, verify=False)
session_id = json.loads(session.text)['value']
def health_check():
url = "https://" + str(args.url) + "." + str(args.domain) + "/rest/appliance/health/" + str(args.check)
headers = {
'vmware-api-session-id': "%s" % session_id
}
response = requests.request("GET", url, headers=headers, verify=False)
value = json.loads(response.text)['value']
if value == 'green':
print('OK')
logout()
sys.exit(0)
elif value == 'yellow':
print('Warning')
sys.exit(1)
elif value == 'orange':
print('Warning')
sys.exit(1)
elif value == 'red':
print('Critical.')
logout()
sys.exit(2)
def logout():
url = "https://" + str(args.url) + "." + str(args.domain) + "/rest/com/vmware/cis/session"
headers = {
'vmware-api-session-id': "%s" % session_id
}
logout_value = requests.request("DELETE", url, headers=headers, verify=False)
print(logout_value.text)
login()
health_check()
logout()
| gpl-3.0 | -1,029,411,036,243,976,400 | 38.25 | 460 | 0.689314 | false | 3.836199 | false | false | false |
QinerTech/QinerApps | openerp/addons/website_blog/models/website_blog.py | 2 | 12097 | # -*- coding: utf-8 -*-
from datetime import datetime
import lxml
import random
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.addons.website.models.website import slug
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools.translate import html_translate
class Blog(osv.Model):
_name = 'blog.blog'
_description = 'Blogs'
_inherit = ['mail.thread', 'website.seo.metadata']
_order = 'name'
_columns = {
'name': fields.char('Blog Name', required=True, translate=True),
'subtitle': fields.char('Blog Subtitle', translate=True),
}
def all_tags(self, cr, uid, ids, min_limit=1, context=None):
req = """
SELECT
p.blog_id, count(*), r.blog_tag_id
FROM
blog_post_blog_tag_rel r
join blog_post p on r.blog_post_id=p.id
WHERE
p.blog_id in %s
GROUP BY
p.blog_id,
r.blog_tag_id
ORDER BY
count(*) DESC
"""
cr.execute(req, [tuple(ids)])
tag_by_blog = {i: [] for i in ids}
for blog_id, freq, tag_id in cr.fetchall():
if freq >= min_limit:
tag_by_blog[blog_id].append(tag_id)
tag_obj = self.pool['blog.tag']
for blog_id in tag_by_blog:
tag_by_blog[blog_id] = tag_obj.browse(cr, uid, tag_by_blog[blog_id], context=context)
return tag_by_blog
class BlogTag(osv.Model):
_name = 'blog.tag'
_description = 'Blog Tag'
_inherit = ['website.seo.metadata']
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
'post_ids': fields.many2many(
'blog.post', string='Posts',
),
}
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
class BlogPost(osv.Model):
_name = "blog.post"
_description = "Blog Post"
_inherit = ['mail.thread', 'website.seo.metadata', 'website.published.mixin']
_order = 'id DESC'
_mail_post_access = 'read'
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = super(BlogPost, self)._website_url(cr, uid, ids, field_name, arg, context=context)
for blog_post in self.browse(cr, uid, ids, context=context):
res[blog_post.id] = "/blog/%s/post/%s" % (slug(blog_post.blog_id), slug(blog_post))
return res
def _compute_ranking(self, cr, uid, ids, name, arg, context=None):
res = {}
for blog_post in self.browse(cr, uid, ids, context=context):
age = datetime.now() - datetime.strptime(blog_post.create_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
res[blog_post.id] = blog_post.visits * (0.5+random.random()) / max(3, age.days)
return res
def _default_content(self, cr, uid, context=None):
return ''' <div class="container">
<section class="mt16 mb16">
<p class="o_default_snippet_text">''' + _("Start writing here...") + '''</p>
</section>
</div> '''
_columns = {
'name': fields.char('Title', required=True, translate=True),
'subtitle': fields.char('Sub Title', translate=True),
'author_id': fields.many2one('res.partner', 'Author'),
'cover_properties': fields.text('Cover Properties'),
'blog_id': fields.many2one(
'blog.blog', 'Blog',
required=True, ondelete='cascade',
),
'tag_ids': fields.many2many(
'blog.tag', string='Tags',
),
'content': fields.html('Content', translate=html_translate, sanitize=False),
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', '&', ('model', '=', self._name), ('message_type', '=', 'comment'), ('path', '=', False)
],
string='Website Messages',
help="Website communication history",
),
# creation / update stuff
'create_date': fields.datetime(
'Created on',
select=True, readonly=True,
),
'create_uid': fields.many2one(
'res.users', 'Author',
select=True, readonly=True,
),
'write_date': fields.datetime(
'Last Modified on',
select=True, readonly=True,
),
'write_uid': fields.many2one(
'res.users', 'Last Contributor',
select=True, readonly=True,
),
'author_avatar': fields.related(
'author_id', 'image_small',
string="Avatar", type="binary"),
'visits': fields.integer('No of Views'),
'ranking': fields.function(_compute_ranking, string='Ranking', type='float'),
}
_defaults = {
'name': '',
'content': _default_content,
'cover_properties': '{"background-image": "none", "background-color": "oe_none", "opacity": "0.6", "resize_class": ""}',
'author_id': lambda self, cr, uid, ctx=None: self.pool['res.users'].browse(cr, uid, uid, context=ctx).partner_id.id,
}
def html_tag_nodes(self, html, attribute=None, tags=None, context=None):
""" Processing of html content to tag paragraphs and set them an unique
ID.
:return result: (html, mappin), where html is the updated html with ID
and mapping is a list of (old_ID, new_ID), where old_ID
is None is the paragraph is a new one. """
existing_attributes = []
mapping = []
if not html:
return html, mapping
if tags is None:
tags = ['p']
if attribute is None:
attribute = 'data-unique-id'
# form a tree
root = lxml.html.fragment_fromstring(html, create_parent='div')
if not len(root) and root.text is None and root.tail is None:
return html, mapping
# check all nodes, replace :
# - img src -> check URL
# - a href -> check URL
for node in root.iter():
if node.tag not in tags:
continue
ancestor_tags = [parent.tag for parent in node.iterancestors()]
old_attribute = node.get(attribute)
new_attribute = old_attribute
if not new_attribute or (old_attribute in existing_attributes):
if ancestor_tags:
ancestor_tags.pop()
counter = random.randint(10000, 99999)
ancestor_tags.append('counter_%s' % counter)
new_attribute = '/'.join(reversed(ancestor_tags))
node.set(attribute, new_attribute)
existing_attributes.append(new_attribute)
mapping.append((old_attribute, new_attribute))
html = lxml.html.tostring(root, pretty_print=False, method='html')
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if html.startswith('<div>') and html.endswith('</div>'):
html = html[5:-6]
return html, mapping
def _postproces_content(self, cr, uid, id, content=None, context=None):
if content is None:
content = self.browse(cr, uid, id, context=context).content
if content is False:
return content
content, mapping = self.html_tag_nodes(content, attribute='data-chatter-id', tags=['p'], context=context)
if id: # not creating
existing = [x[0] for x in mapping if x[0]]
msg_ids = self.pool['mail.message'].search(cr, SUPERUSER_ID, [
('res_id', '=', id),
('model', '=', self._name),
('path', 'not in', existing),
('path', '!=', False)
], context=context)
self.pool['mail.message'].unlink(cr, SUPERUSER_ID, msg_ids, context=context)
return content
def _check_for_publication(self, cr, uid, ids, vals, context=None):
if vals.get('website_published'):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
for post in self.browse(cr, uid, ids, context=context):
post.blog_id.message_post(
body='<p>%(post_publication)s <a href="%(base_url)s/blog/%(blog_slug)s/post/%(post_slug)s">%(post_link)s</a></p>' % {
'post_publication': _('A new post %s has been published on the %s blog.') % (post.name, post.blog_id.name),
'post_link': _('Click here to access the post.'),
'base_url': base_url,
'blog_slug': slug(post.blog_id),
'post_slug': slug(post),
},
subtype='website_blog.mt_blog_blog_published')
return True
return False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if 'content' in vals:
vals['content'] = self._postproces_content(cr, uid, None, vals['content'], context=context)
create_context = dict(context, mail_create_nolog=True)
post_id = super(BlogPost, self).create(cr, uid, vals, context=create_context)
self._check_for_publication(cr, uid, [post_id], vals, context=context)
return post_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if 'content' in vals:
vals['content'] = self._postproces_content(cr, uid, ids[0], vals['content'], context=context)
result = super(BlogPost, self).write(cr, uid, ids, vals, context)
self._check_for_publication(cr, uid, ids, vals, context=context)
return result
def get_access_action(self, cr, uid, ids, context=None):
""" Override method that generated the link to access the document. Instead
of the classic form view, redirect to the post on the website directly """
post = self.browse(cr, uid, ids[0], context=context)
return {
'type': 'ir.actions.act_url',
'url': '/blog/%s/post/%s' % (post.blog_id.id, post.id),
'target': 'self',
'res_id': self.id,
}
def _notification_get_recipient_groups(self, cr, uid, ids, message, recipients, context=None):
""" Override to set the access button: everyone can see an access button
on their notification email. It will lead on the website view of the
post. """
res = super(BlogPost, self)._notification_get_recipient_groups(cr, uid, ids, message, recipients, context=context)
access_action = self._notification_link_helper('view', model=message.model, res_id=message.res_id)
for category, data in res.iteritems():
res[category]['button_access'] = {'url': access_action, 'title': _('View Blog Post')}
return res
class Website(osv.Model):
_inherit = "website"
def page_search_dependencies(self, cr, uid, view_id, context=None):
dep = super(Website, self).page_search_dependencies(cr, uid, view_id, context=context)
post_obj = self.pool.get('blog.post')
view = self.pool.get('ir.ui.view').browse(cr, uid, view_id, context=context)
name = view.key.replace("website.", "")
fullname = "website.%s" % name
dom = [
'|', ('content', 'ilike', '/page/%s' % name), ('content', 'ilike', '/page/%s' % fullname)
]
posts = post_obj.search(cr, uid, dom, context=context)
if posts:
page_key = _('Blog Post')
dep[page_key] = []
for p in post_obj.browse(cr, uid, posts, context=context):
dep[page_key].append({
'text': _('Blog Post <b>%s</b> seems to have a link to this page !') % p.name,
'link': p.website_url
})
return dep
| gpl-3.0 | 4,718,703,543,692,162,000 | 39.868243 | 137 | 0.55427 | false | 3.824534 | false | false | false |
nhuntwalker/rational_whimsy | rational_whimsy/blog/models.py | 1 | 2123 | """The Blog Post model."""
from django.db import models
from django.dispatch import receiver
from django.db.models.signals import post_save
from redactor.fields import RedactorField
from taggit.managers import TaggableManager
# Create your models here.
PUBLICATION_STATUS = (
("published", "Published"),
("draft", "Draft"),
("private", "Private")
)
class PostManager(models.Manager):
"""Retrieve all the published posts in reverse date order."""
def get_queryset(self):
"""Alter the queryset returned."""
return super(
PostManager,
self
).get_queryset().filter(status="published").order_by("-published_date")
class Post(models.Model):
"""The model for an individual blog post."""
title = models.CharField(name="title", max_length=255)
cover_img = models.ImageField(upload_to="post_covers", default="post_covers/stock-cover.jpg")
body = RedactorField(verbose_name="body")
created = models.DateTimeField(name="created", auto_now_add=True)
published_date = models.DateTimeField(
name="published_date",
blank=True,
null=True
)
modified = models.DateTimeField(name="modified", auto_now=True)
slug = models.SlugField(max_length=255, unique=True)
status = models.CharField(
name="status", choices=PUBLICATION_STATUS,
default="draft", max_length=20)
featured = models.BooleanField(default=False)
objects = models.Manager()
published = PostManager()
tags = TaggableManager()
def __str__(self):
"""The string representation of the object."""
return self.title
@receiver(post_save, sender=Post)
def unfeature_posts(sender, **kwargs):
"""Reset feature status when saved post is featured.
When a post is saved (either added or edited), if it's checked as being
featured then make every/any other featured post unfeatured.
"""
if kwargs["instance"].featured:
other_posts = Post.objects.exclude(pk=kwargs["instance"].pk)
for post in other_posts:
post.featured = False
post.save()
| mit | -6,685,609,270,705,033,000 | 30.220588 | 97 | 0.666039 | false | 4.06705 | false | false | false |
nihilus/epanos | pyc_fmtstr_parser/printf_parse.py | 1 | 12979 | # ported from gnulib rev be7d73709d2b3bceb987f1be00a049bb7021bf87
#
# Copyright (C) 2014, Mark Laws.
# Copyright (C) 1999, 2002-2003, 2005-2007, 2009-2014 Free Software
# Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
import ctypes
from flufl.enum import Enum
sizeof = ctypes.sizeof
Arg_type = Enum('Arg_type', [str(x.strip()) for x in '''
TYPE_NONE
TYPE_SCHAR
TYPE_UCHAR
TYPE_SHORT
TYPE_USHORT
TYPE_INT
TYPE_UINT
TYPE_LONGINT
TYPE_ULONGINT
TYPE_LONGLONGINT
TYPE_ULONGLONGINT
TYPE_DOUBLE
TYPE_LONGDOUBLE
TYPE_CHAR
TYPE_WIDE_CHAR
TYPE_STRING
TYPE_WIDE_STRING
TYPE_POINTER
TYPE_COUNT_SCHAR_POINTER
TYPE_COUNT_SHORT_POINTER
TYPE_COUNT_INT_POINTER
TYPE_COUNT_LONGINT_POINTER
TYPE_COUNT_LONGLONGINT_POINTER
'''.splitlines() if x != ''])
FLAG_GROUP = 1 # ' flag
FLAG_LEFT = 2 # - flag
FLAG_SHOWSIGN = 4 # + flag
FLAG_SPACE = 8 # space flag
FLAG_ALT = 16 # # flag
FLAG_ZERO = 32
# arg_index value indicating that no argument is consumed.
ARG_NONE = ~0
class Argument(object):
__slots__ = ['type', 'data']
class Arguments(object):
__slots__ = ['count', 'arg']
def __init__(self):
self.count = 0
self.arg = []
class Directive(object):
'''A parsed directive.'''
__slots__ = ['dir_start', 'dir_end', 'flags', 'width_start', 'width_end',
'width_arg_index', 'precision_start', 'precision_end',
'precision_arg_index', 'conversion', 'arg_index']
# conversion: d i o u x X f F e E g G a A c s p n U % but not C S
def __init__(self):
self.flags = 0
self.width_start = None
self.width_end = None
self.width_arg_index = ARG_NONE
self.precision_start = None
self.precision_end = None
self.precision_arg_index = ARG_NONE
self.arg_index = ARG_NONE
class Directives(object):
'''A parsed format string.'''
__slots__ = ['count', 'dir', 'max_width_length', 'max_precision_length']
def __init__(self):
self.count = 0
self.dir = []
def REGISTER_ARG(a, index, type):
n = index
while a.count <= n:
try:
a.arg[a.count]
except IndexError:
a.arg.append(Argument())
a.arg[a.count].type = Arg_type.TYPE_NONE
a.count += 1
if a.arg[n].type == Arg_type.TYPE_NONE:
a.arg[n].type = type
elif a.arg[n].type != type:
raise ValueError('ambiguous type for positional argument')
def conv_signed(c, flags):
# If 'long long' exists and is larger than 'long':
if flags >= 16 or flags & 4:
return c, Arg_type.TYPE_LONGLONGINT
else:
# If 'long long' exists and is the same as 'long', we parse "lld" into
# TYPE_LONGINT.
if flags >= 8:
type = Arg_type.TYPE_LONGINT
elif flags & 2:
type = Arg_type.TYPE_SCHAR
elif flags & 1:
type = Arg_type.TYPE_SHORT
else:
type = Arg_type.TYPE_INT
return c, type
def conv_unsigned(c, flags):
# If 'long long' exists and is larger than 'long':
if flags >= 16 or flags & 4:
return c, Arg_type.TYPE_ULONGLONGINT
else:
# If 'unsigned long long' exists and is the same as 'unsigned long', we
# parse "llu" into TYPE_ULONGINT.
if flags >= 8:
type = Arg_type.TYPE_ULONGINT
elif flags & 2:
type = Arg_type.TYPE_UCHAR
elif flags & 1:
type = Arg_type.TYPE_USHORT
else:
type = Arg_type.TYPE_UINT
return c, type
def conv_float(c, flags):
if flags >= 16 or flags & 4:
return c, Arg_type.TYPE_LONGDOUBLE
else:
return c, Arg_type.TYPE_DOUBLE
def conv_char(c, flags):
if flags >= 8:
return c, Arg_type.TYPE_WIDE_CHAR
else:
return c, Arg_type.TYPE_CHAR
def conv_widechar(c, flags):
c = 'c'
return c, Arg_type.TYPE_WIDE_CHAR
def conv_string(c, flags):
if flags >= 8:
return c, Arg_type.TYPE_WIDE_STRING
else:
return c, Arg_type.TYPE_STRING
def conv_widestring(c, flags):
c = 's'
return c, Arg_type.TYPE_WIDE_STRING
def conv_pointer(c, flags):
return c, Arg_type.TYPE_POINTER
def conv_intpointer(c, flags):
# If 'long long' exists and is larger than 'long':
if flags >= 16 or flags & 4:
return c, Arg_type.TYPE_COUNT_LONGLONGINT_POINTER
else:
# If 'long long' exists and is the same as 'long', we parse "lln" into
# TYPE_COUNT_LONGINT_POINTER.
if flags >= 8:
type = Arg_type.TYPE_COUNT_LONGINT_POINTER
elif flags & 2:
type = Arg_type.TYPE_COUNT_SCHAR_POINTER
elif flags & 1:
type = Arg_type.TYPE_COUNT_SHORT_POINTER
else:
type = Arg_type.TYPE_COUNT_INT_POINTER
return c, type
def conv_none(c, flags):
return c, Arg_type.TYPE_NONE
_conv_char = {
'd': conv_signed,
'i': conv_signed,
'o': conv_unsigned,
'u': conv_unsigned,
'x': conv_unsigned,
'X': conv_unsigned,
'f': conv_float,
'F': conv_float,
'e': conv_float,
'E': conv_float,
'g': conv_float,
'G': conv_float,
'a': conv_float,
'A': conv_float,
'c': conv_char,
'C': conv_widechar,
's': conv_string,
'S': conv_widestring,
'p': conv_pointer,
'n': conv_intpointer,
'%': conv_none
}
def printf_parse(fmt):
'''Parses the format string. Fills in the number N of directives, and fills
in directives[0], ..., directives[N-1], and sets directives[N].dir_start to
the end of the format string. Also fills in the arg_type fields of the
arguments and the needed count of arguments.'''
cp = 0 # index into format string
arg_posn = 0 # number of regular arguments consumed
max_width_length = 0
max_precision_length = 0
d = Directives()
a = Arguments()
while True:
try:
c = fmt[cp]
except IndexError:
break
cp += 1
if c == '%':
arg_index = ARG_NONE
d.dir.append(Directive())
dp = d.dir[d.count]
dp.dir_start = cp - 1
# Test for positional argument.
if fmt[cp].isdigit():
np = cp
while fmt[np].isdigit():
np += 1
if fmt[np] == '$':
n = 0
np = cp
while fmt[np].isdigit():
n = n * 10 + (ord(fmt[np]) - ord('0'))
np += 1
if n == 0:
raise ValueError('positional argument 0')
arg_index = n - 1
cp = np + 1
# Read the flags.
while True:
if fmt[cp] == '\'':
dp.flags |= FLAG_GROUP
cp += 1
elif fmt[cp] == '-':
dp.flags |= FLAG_LEFT
cp += 1
elif fmt[cp] == '+':
dp.flags |= FLAG_SHOWSIGN
cp += 1
elif fmt[cp] == ' ':
dp.flags |= FLAG_SPACE
cp += 1
elif fmt[cp] == '#':
dp.flags |= FLAG_ALT
cp += 1
elif fmt[cp] == '0':
dp.flags |= FLAG_ZERO
cp += 1
else:
break
# Parse the field width.
if fmt[cp] == '*':
dp.width_start = cp
cp += 1
dp.width_end = cp
if max_width_length < 1:
max_width_length = 1
# Test for positional argument.
if fmt[cp].isdigit():
np = cp
while fmt[np].isdigit():
np += 1
if fmt[np] == '$':
n = 0
np = cp
while fmt[np].isdigit():
n = n * 10 + (ord(fmt[np]) - ord('0'))
np += 1
if n == 0:
raise ValueError('positional argument 0')
dp.width_arg_index = n - 1
cp = np + 1
if dp.width_arg_index == ARG_NONE:
dp.width_arg_index = arg_posn
arg_posn += 1
REGISTER_ARG(a, dp.width_arg_index, Arg_type.TYPE_INT)
elif fmt[cp].isdigit():
dp.width_start = cp
while fmt[cp].isdigit():
cp += 1
dp.width_end = cp
width_length = dp.width_end - dp.width_start
if max_width_length < width_length:
max_width_length = width_length
# Parse the precision.
if fmt[cp] == '.':
cp += 1
if fmt[cp] == '*':
dp.precision_start = cp - 1
cp += 1
dp.precision_end = cp
if max_precision_length < 2:
max_precision_length = 2
# Test for positional argument.
if fmt[cp].isdigit():
np = cp
while fmt[np].isdigit():
np += 1
if fmt[np] == '$':
n = 0
np = cp
while fmt[np].isdigit():
n = n * 10 + (ord(fmt[np]) - ord('0'))
np += 1
if n == 0:
raise ValueError('positional argument 0')
dp.precision_arg_index = n - 1
cp = np + 1
if dp.precision_arg_index == ARG_NONE:
dp.precision_arg_index = arg_posn
arg_posn += 1
REGISTER_ARG(a, dp.precision_arg_index, Arg_type.TYPE_INT)
else:
dp.precision_start = cp - 1
while fmt[cp].isdigit():
cp += 1
dp.precision_end = cp
precision_length = dp.precision_end - dp.precision_start
if max_precision_length < precision_length:
max_precision_length = precision_length
# Parse argument type/size specifiers.
flags = 0
while True:
if fmt[cp] == 'h':
flags |= (1 << (flags & 1))
cp += 1
elif fmt[cp] == 'L':
flags |= 4
cp += 1
elif fmt[cp] == 'l':
flags += 8
cp += 1
elif fmt[cp] == 'j':
raise ValueError("don't know how to handle intmax_t")
elif fmt[cp] == 'z':
if sizeof(ctypes.c_size_t) > sizeof(ctypes.c_long):
# size_t = long long
flags += 16
elif sizeof(ctypes.c_size_t) > sizeof(ctypes.c_int):
# size_t = long
flags += 8
cp += 1
elif fmt[cp] == 't':
raise ValueError("don't know how to handle ptrdiff_t")
else:
break
# Read the conversion character.
c = fmt[cp]
cp += 1
try:
c, type = _conv_char[c](c, flags)
except KeyError:
raise ValueError('bad conversion character: %%%s' % c)
if type != Arg_type.TYPE_NONE:
dp.arg_index = arg_index
if dp.arg_index == ARG_NONE:
dp.arg_index = arg_posn
arg_posn += 1
REGISTER_ARG(a, dp.arg_index, type)
dp.conversion = c
dp.dir_end = cp
d.count += 1
d.dir.append(Directive())
d.dir[d.count].dir_start = cp
d.max_width_length = max_width_length
d.max_precision_length = max_precision_length
return d, a
| mit | -4,507,147,994,739,678,700 | 28.974596 | 80 | 0.47677 | false | 3.930648 | false | false | false |
ktarrant/options_csv | journal/trades/migrations/0001_initial.py | 1 | 1392 | # Generated by Django 2.1 on 2018-08-28 05:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Leg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=16)),
('exec_date', models.DateTimeField(verbose_name='date executed')),
('buy_or_sell', models.CharField(choices=[('buy', 'Buy'), ('sell', 'Sell')], default='buy', max_length=4)),
('open_or_close', models.CharField(choices=[('open', 'Open'), ('close', 'Close')], default='open', max_length=5)),
('instrument', models.CharField(choices=[('call', 'Call'), ('put', 'Put'), ('stock', 'Stock'), ('fut', 'Futures')], default='call', max_length=5)),
('quantity', models.PositiveIntegerField()),
('execution_price', models.FloatField()),
('execution_fees', models.FloatField(default=0)),
('expiration_date', models.DateTimeField(null=True, verbose_name='expiration')),
('margin', models.FloatField(null=True)),
('underlying_price', models.FloatField(null=True)),
],
),
]
| mit | 1,929,704,377,205,315,600 | 43.903226 | 163 | 0.558908 | false | 4.256881 | false | false | false |
rschnapka/partner-contact | base_partner_merge/validate_email.py | 1 | 4573 | # -*- coding: utf-8 -*-
# RFC 2822 - style email validation for Python
# (c) 2012 Syrus Akbary <[email protected]>
# Extended from (c) 2011 Noel Bush <[email protected]>
# for support of mx and user check
# This code is made available to you under the GNU LGPL v3.
#
# This module provides a single method, valid_email_address(),
# which returns True or False to indicate whether a given address
# is valid according to the 'addr-spec' part of the specification
# given in RFC 2822. Ideally, we would like to find this
# in some other library, already thoroughly tested and well-
# maintained. The standard Python library email.utils
# contains a parse_addr() function, but it is not sufficient
# to detect many malformed addresses.
#
# This implementation aims to be faithful to the RFC, with the
# exception of a circular definition (see comments below), and
# with the omission of the pattern components marked as "obsolete".
import re
import smtplib
try:
import DNS
ServerError = DNS.ServerError
except:
DNS = None
class ServerError(Exception):
pass
# All we are really doing is comparing the input string to one
# gigantic regular expression. But building that regexp, and
# ensuring its correctness, is made much easier by assembling it
# from the "tokens" defined by the RFC. Each of these tokens is
# tested in the accompanying unit test file.
#
# The section of RFC 2822 from which each pattern component is
# derived is given in an accompanying comment.
#
# (To make things simple, every string below is given as 'raw',
# even when it's not strictly necessary. This way we don't forget
# when it is necessary.)
#
WSP = r'[ \t]'
CRLF = r'(?:\r\n)'
NO_WS_CTL = r'\x01-\x08\x0b\x0c\x0f-\x1f\x7f'
QUOTED_PAIR = r'(?:\\.)'
FWS = r'(?:(?:{0}*{1})?{0}+)'.format(WSP, CRLF)
CTEXT = r'[{0}\x21-\x27\x2a-\x5b\x5d-\x7e]'.format(NO_WS_CTL)
CCONTENT = r'(?:{0}|{1})'.format(CTEXT, QUOTED_PAIR)
COMMENT = r'\((?:{0}?{1})*{0}?\)'.format(FWS, CCONTENT)
CFWS = r'(?:{0}?{1})*(?:{0}?{1}|{0})'.format(FWS, COMMENT)
ATEXT = r'[\w!#$%&\'\*\+\-/=\?\^`\{\|\}~]'
ATOM = r'{0}?{1}+{0}?'.format(CFWS, ATEXT)
DOT_ATOM_TEXT = r'{0}+(?:\.{0}+)*'.format(ATEXT)
DOT_ATOM = r'{0}?{1}{0}?'.format(CFWS, DOT_ATOM_TEXT)
QTEXT = r'[{0}\x21\x23-\x5b\x5d-\x7e]'.format(NO_WS_CTL)
QCONTENT = r'(?:{0}|{1})'.format(QTEXT, QUOTED_PAIR)
QUOTED_STRING = r'{0}?"(?:{1}?{2})*{1}?"{0}?'.format(CFWS, FWS, QCONTENT)
LOCAL_PART = r'(?:{0}|{1})'.format(DOT_ATOM, QUOTED_STRING)
DTEXT = r'[{0}\x21-\x5a\x5e-\x7e]'.format(NO_WS_CTL)
DCONTENT = r'(?:{0}|{1})'.format(DTEXT, QUOTED_PAIR)
DOMAIN_LITERAL = r'{0}?\[(?:{1}?{2})*{1}?\]{0}?'.format(CFWS, FWS, DCONTENT)
DOMAIN = r'(?:{0}|{1})'.format(DOT_ATOM, DOMAIN_LITERAL)
ADDR_SPEC = r'{0}@{1}'.format(LOCAL_PART, DOMAIN)
VALID_ADDRESS_REGEXP = '^' + ADDR_SPEC + '$'
def validate_email(email, check_mx=False, verify=False):
"""Indicate whether the given string is a valid email address
according to the 'addr-spec' portion of RFC 2822 (see section
3.4.1). Parts of the spec that are marked obsolete are *not*
included in this test, and certain arcane constructions that
depend on circular definitions in the spec may not pass, but in
general this should correctly identify any email address likely
to be in use as of 2011."""
try:
assert re.match(VALID_ADDRESS_REGEXP, email) is not None
check_mx |= verify
if check_mx:
if not DNS:
raise Exception('For check the mx records or check if the '
'email exists you must have installed pyDNS '
'python package')
DNS.DiscoverNameServers()
hostname = email[email.find('@') + 1:]
mx_hosts = DNS.mxlookup(hostname)
for mx in mx_hosts:
try:
smtp = smtplib.SMTP()
smtp.connect(mx[1])
if not verify:
return True
status, _ = smtp.helo()
if status != 250:
continue
smtp.mail('')
status, _ = smtp.rcpt(email)
if status != 250:
return False
break
# Server not permits verify user
except smtplib.SMTPServerDisconnected:
break
except smtplib.SMTPConnectError:
continue
except (AssertionError, ServerError):
return False
return True
| agpl-3.0 | 1,122,090,450,398,727,800 | 40.572727 | 77 | 0.605292 | false | 3.345282 | false | false | false |
eblume/tf-idf | tfidf/preprocess.py | 1 | 9539 | #!/usr/bin/env python3
"""Pre processing step for text.
Example:
pp = Preprocesses()
"""
from __future__ import absolute_import, with_statement
import re
from collections import namedtuple
from cachetools import LRUCache, cached # python2 support
from nltk.stem import SnowballStemmer
from six.moves.html_parser import HTMLParser # python2 support
from stop_words import get_stop_words
from .dockeyword import Keyword
unescape = HTMLParser().unescape
def handle_unicode(text):
"""Needed for the description fields."""
if re.search(r'\\+((u([0-9]|[a-z]|[A-Z]){4}))', text):
text = text.encode('utf-8').decode('unicode-escape')
text = re.sub(r'\\n', '\n', text)
text = re.sub(r'\\t', '\t', text)
return text
def handle_html_unquote(text):
"""Detect if there are HTML encoded characters, then decode them."""
if re.search(r'(&#?x?)([A-Z]|[a-z]|[0-9]){2,10};', text):
text = unescape(text)
return text
def handle_mac_quotes(text):
"""Handle the unfortunate non-ascii quotes OSX inserts."""
text = text.replace('“', '"').replace('”', '"')\
.replace('‘', "'").replace('’', "'")
return text
def handle_text_break_dash(text):
"""Convert text break dashes into semicolons to simplify things.
Example:
"She loved icecream- mint chip especially"
"She loved icecream - mint chip especially"
both convert to
"She loved icecream; mint chip especially"
However,
"The 27-year-old could eat icecream any day"
will not be changed.
"""
return re.sub(r'\s+-\s*|\s*-\s+', ';', text)
def clean_text(raw_text):
"""Strip text of non useful characters."""
# Must strip HTML tags out first!
text = re.sub('<[^<]+?>', '', raw_text)
text = handle_unicode(text)
text = handle_html_unquote(text)
text = handle_mac_quotes(text)
text = handle_text_break_dash(text)
text = text.lower()
regex_subs = ['\t\n\r', '\s+', '&']
for regex_sub in regex_subs:
text = re.sub(regex_sub, ' ', text)
return text
class Preprocessor(object):
"""Prep the text for TF-IDF calculations.
Fixes some unicode problems, handles HTML character encoding,
and removes HTML tags.
Strips some non alphanumeric characters, but keeps ngram boundary
markers (eg, period (',') and semi-colon (';'))
If a stopwords file is provided, it will remove stopwords.
Example:
>>> processor = Preprocessor('english_stopwords.txt')
>>> processor.clean('He was an interesting fellow.')
"was interesting fellow."
"""
stopwords = set()
contractions = r"(n't|'s|'re)$"
negative_gram_breaks = r'[^:;!^,\?\.\[|\]\(|\)"`]+'
supported_languages = (
'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'hungarian',
'italian', 'kazakh', 'norwegian', 'porter', 'portuguese', 'romanian',
'russian', 'spanish', 'swedish', 'turkish'
)
def __init__(self, language=None, gramsize=1, all_ngrams=True,
stopwords_file=None, stemmer=None):
"""Preprocessor must be initalized for use if using stopwords.
stopwords_file (filename): contains stopwords, one per line
stemmer (function): takes in a word and returns the stemmed version
gramsize (int): maximum word size for ngrams
all_ngrams (bool):
if true, all possible ngrams of length "gramsize" and smaller will
be examined. If false, only ngrams of _exactly_ length "gramsize"
will be run.
negative_gram_breaks (regex):
if a word ends with one of these characters, an
ngram may not cross that. Expressed as a _negative_ regex.
Example:
in the sentence "Although he saw the car, he ran across the street"
"car he" may not be a bi-gram
stopwords_file (filename):
Provide a list of stopwords. If used in addition to "language", the
provided stopwords file overrides the default.
stemmer (function):
A function that takes in a single argument (str) and returns a string
as the stemmed word. Overrides the default behavior if specified.
Default None:
Use the NLTK snowball stemmer for the sepcified language. If
language is not found, no stemming will take place.
"""
if language:
assert language in self.supported_languages
if language in SnowballStemmer.languages:
sb_stemmer = SnowballStemmer(language)
self.__stemmer = sb_stemmer.stem
else:
self.__stemmer = lambda x: x # no change to word
self.stopwords = get_stop_words(language)
if stopwords_file:
self._load_stopwords(stopwords_file)
if stemmer:
self.__stemmer = stemmer
self.__gramsize = gramsize
self.__all_ngrams = all_ngrams
@property
def gramsize(self):
"""Number of words in the ngram."""
return self.__gramsize
@property
def all_ngrams(self):
"""True if ngrams of size "gramsize" or smaller will be generated.
False if only ngrams of _exactly_ size "gramsize" are generated.
"""
return self.__all_ngrams
def _load_stopwords(self, filename):
with open(filename) as f:
words = []
for line in f:
words.append(line.strip())
self.stopwords = set(words)
def handle_stopwords(self, text):
"""Remove stop words from the text."""
out = []
for word in text.split(' '):
# Remove common contractions for stopwords when checking list
check_me = re.sub(self.contractions, '', word)
if check_me in self.stopwords:
continue
out.append(word)
return ' '.join(out)
def normalize_term(self, text):
"""Clean first cleans the text characters, then removes the stopwords.
Assumes the input is already the number of words you want for the ngram.
"""
text = clean_text(text)
text = self.handle_stopwords(text)
return self.stem_term(text)
@cached(LRUCache(maxsize=10000))
def _stem(self, word):
"""The stem cache is used to cache up to 10,000 stemmed words.
This substantially speeds up the word stemming on larger documents.
"""
return self.__stemmer(word)
def stem_term(self, term):
"""Apply the standard word procesing (eg stemming). Returns a stemmed ngram."""
return ' '.join([self._stem(x) for x in term.split(' ')])
def yield_keywords(self, raw_text, document=None):
"""Yield keyword objects as mono, di, tri... *-grams.
Use this as an iterator.
Will not create ngrams across logical sentence breaks.
Example:
s = "Although he saw the car, he ran across the street"
the valid bigrams for the sentences are:
['Although he', 'saw the', 'he saw', 'the car',
'he ran', 'across the', 'ran across', 'the street']
"car he" is not a valid bi-gram
This will also stem words when applicable.
Example:
s = "All the cars were honking their horns."
['all', 'the', 'car', 'were', 'honk', 'their', 'horn']
"""
gramlist = range(1, self.gramsize + 1) if self.all_ngrams else [self.gramsize]
for sentence in positional_splitter(self.negative_gram_breaks, raw_text):
words = [x for x in positional_splitter(r'\S+', sentence.text)]
# Remove all stopwords
words_no_stopwords = []
for w in words:
# Remove common contractions for stopwords when checking list
check_me = re.sub(self.contractions, '', w.text)
if check_me not in self.stopwords:
words_no_stopwords.append(w)
# Make the ngrams
for gramsize in gramlist:
# You need to try as many offsets as chunk size
for offset in range(0, gramsize): # number of words offest
data = words_no_stopwords[offset:]
text_in_chunks = [data[pos:pos + gramsize]
for pos in range(0, len(data), gramsize)
if len(data[pos:pos + gramsize]) == gramsize]
for word_list in text_in_chunks:
word_text = ' '.join([self.stem_term(w.text) for w in word_list])
word_global_start = sentence.start + word_list[0].start
word_global_end = sentence.start + word_list[-1].end
yield Keyword(word_text, document=document,
start=word_global_start, end=word_global_end)
raise StopIteration
PositionalWord = namedtuple('PositionalWord', ['text', 'start', 'end'])
def positional_splitter(regex, text):
r"""Yield sentence chunks (as defined by the regex) as well as their location.
NOTE: the regex needs to be an "inverse match"
Example:
To split on whitespace, you match:
r'\S+' <-- "a chain of anything that's NOT whitespace"
"""
for res in re.finditer(regex, text):
yield PositionalWord(res.group(0), res.start(), res.end())
raise StopIteration
| mit | 2,284,034,132,267,696,400 | 35.94186 | 89 | 0.58934 | false | 3.893382 | false | false | false |
industrydive/fileflow | fileflow/operators/dive_python_operator.py | 1 | 1096 | """
.. module:: operators.dive_operator
:synopsis: DivePythonOperator for use with TaskRunner
.. moduleauthor:: Laura Lorenz <[email protected]>
.. moduleauthor:: Miriam Sexton <[email protected]>
"""
from airflow.operators import PythonOperator
from .dive_operator import DiveOperator
class DivePythonOperator(DiveOperator, PythonOperator):
"""
Python operator that can send along data dependencies to its callable.
Generates the callable by initializing its python object and calling its method.
"""
def __init__(self, python_object, python_method="run", *args, **kwargs):
self.python_object = python_object
self.python_method = python_method
kwargs['python_callable'] = None
super(DivePythonOperator, self).__init__(*args, **kwargs)
def pre_execute(self, context):
context.update(self.op_kwargs)
context.update({"data_dependencies": self.data_dependencies})
instantiated_object = self.python_object(context)
self.python_callable = getattr(instantiated_object, self.python_method)
| apache-2.0 | -2,527,563,072,624,418,000 | 34.354839 | 84 | 0.711679 | false | 3.985455 | false | false | false |
awni/tensorflow | tensorflow/contrib/skflow/python/skflow/tests/test_multioutput.py | 1 | 1502 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from sklearn import datasets
from sklearn.metrics import accuracy_score, mean_squared_error
import tensorflow as tf
from tensorflow.contrib.skflow.python import skflow
class MultiOutputTest(tf.test.TestCase):
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
regressor = skflow.TensorFlowLinearRegressor(learning_rate=0.01)
regressor.fit(X, y)
score = mean_squared_error(regressor.predict(X), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -3,187,376,389,461,075,500 | 33.930233 | 78 | 0.709055 | false | 3.645631 | false | false | false |
magcius/sweettooth | sweettooth/extensions/migrations/0008_new_icon_default.py | 1 | 6118 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
new_default = orm.Extension._meta.get_field_by_name('icon')[0].default
for ext in orm.Extension.objects.filter(icon=""):
ext.icon = new_default
ext.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'extensions.extension': {
'Meta': {'object_name': 'Extension'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'icon': ('django.db.models.fields.files.ImageField', [], {'default': "'/static/images/plugin.png'", 'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'screenshot': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'extensions.extensionversion': {
'Meta': {'unique_together': "(('extension', 'version'),)", 'object_name': 'ExtensionVersion'},
'extension': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['extensions.Extension']"}),
'extra_json_fields': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shell_versions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['extensions.ShellVersion']", 'symmetrical': 'False'}),
'source': ('django.db.models.fields.files.FileField', [], {'max_length': '223'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'extensions.shellversion': {
'Meta': {'object_name': 'ShellVersion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'major': ('django.db.models.fields.PositiveIntegerField', [], {}),
'minor': ('django.db.models.fields.PositiveIntegerField', [], {}),
'point': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['extensions']
| agpl-3.0 | -7,303,733,742,660,065,000 | 67.741573 | 182 | 0.556064 | false | 3.809465 | false | false | false |
mattboyer/sqbrite | src/record.py | 1 | 5656 | # MIT License
#
# Copyright (c) 2017 Matt Boyer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pdb
from . import _LOGGER
from .field import (Field, MalformedField)
from .utils import (Varint, IndexDict)
class MalformedRecord(Exception):
pass
class Record(object):
column_types = {
0: (0, "NULL"),
1: (1, "8-bit twos-complement integer"),
2: (2, "big-endian 16-bit twos-complement integer"),
3: (3, "big-endian 24-bit twos-complement integer"),
4: (4, "big-endian 32-bit twos-complement integer"),
5: (6, "big-endian 48-bit twos-complement integer"),
6: (8, "big-endian 64-bit twos-complement integer"),
7: (8, "Floating point"),
8: (0, "Integer 0"),
9: (0, "Integer 1"),
}
def __init__(self, record_bytes):
self._bytes = record_bytes
self._header_bytes = None
self._fields = IndexDict()
self._parse()
def __bytes__(self):
return self._bytes
@property
def header(self):
return self._header_bytes
@property
def fields(self):
return self._fields
def truncate(self, new_length):
self._bytes = self._bytes[:new_length]
self._parse()
def _parse(self):
header_offset = 0
header_length_varint = Varint(
# A varint is encoded on *at most* 9 bytes
bytes(self)[header_offset:9 + header_offset]
)
# Let's keep track of how many bytes of the Record header (including
# the header length itself) we've succesfully parsed
parsed_header_bytes = len(header_length_varint)
if len(bytes(self)) < int(header_length_varint):
raise MalformedRecord(
"Not enough bytes to fully read the record header!"
)
header_offset += len(header_length_varint)
self._header_bytes = bytes(self)[:int(header_length_varint)]
col_idx = 0
field_offset = int(header_length_varint)
while header_offset < int(header_length_varint):
serial_type_varint = Varint(
bytes(self)[header_offset:9 + header_offset]
)
serial_type = int(serial_type_varint)
col_length = None
try:
col_length, _ = self.column_types[serial_type]
except KeyError:
if serial_type >= 13 and (1 == serial_type % 2):
col_length = (serial_type - 13) // 2
elif serial_type >= 12 and (0 == serial_type % 2):
col_length = (serial_type - 12) // 2
else:
raise ValueError(
"Unknown serial type {}".format(serial_type)
)
try:
field_obj = Field(
col_idx,
serial_type,
bytes(self)[field_offset:field_offset + col_length]
)
except MalformedField as ex:
_LOGGER.warning(
"Caught %r while instantiating field %d (%d)",
ex, col_idx, serial_type
)
raise MalformedRecord
except Exception as ex:
_LOGGER.warning(
"Caught %r while instantiating field %d (%d)",
ex, col_idx, serial_type
)
pdb.set_trace()
raise
self._fields[col_idx] = field_obj
col_idx += 1
field_offset += col_length
parsed_header_bytes += len(serial_type_varint)
header_offset += len(serial_type_varint)
if field_offset > len(bytes(self)):
raise MalformedRecord
# assert(parsed_header_bytes == int(header_length_varint))
def print_fields(self, table=None):
for field_idx in self._fields:
field_obj = self._fields[field_idx]
if not table or table.columns is None:
_LOGGER.info(
"\tField %d (%d bytes), type %d: %s",
field_obj.index,
len(field_obj),
field_obj.serial_type,
field_obj.value
)
else:
_LOGGER.info(
"\t%s: %s",
table.columns[field_obj.index],
field_obj.value
)
def __repr__(self):
return '<Record {} fields, {} bytes, header: {} bytes>'.format(
len(self._fields), len(bytes(self)), len(self.header)
)
| mit | 4,934,288,116,914,393,000 | 33.487805 | 79 | 0.554455 | false | 4.220896 | false | false | false |
flgiordano/netcash | +/google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/tools/appengine_rpc_test_util.py | 1 | 6895 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing code that uses appengine_rpc's *RpcServer."""
import logging
import StringIO
import urllib2
from googlecloudsdk.third_party.appengine.tools.appengine_rpc import AbstractRpcServer
from googlecloudsdk.third_party.appengine.tools.appengine_rpc import HttpRpcServer
class TestRpcServerMixin(object):
"""Provides a mocked-out version of HttpRpcServer for testing purposes."""
def set_strict(self, strict=True):
"""Enables strict mode."""
self.opener.set_strict(strict)
def _GetOpener(self):
"""Returns a MockOpener.
Returns:
A MockOpener object.
"""
return TestRpcServerMixin.MockOpener()
class MockResponse(object):
"""A mocked out response object for testing purposes."""
def __init__(self, body, code=200, headers=None):
"""Creates a new MockResponse.
Args:
body: The text of the body to return.
code: The response code (default 200).
headers: An optional header dictionary.
"""
self.fp = StringIO.StringIO(body)
self.code = code
self.headers = headers
self.msg = ""
if self.headers is None:
self.headers = {}
def info(self):
return self.headers
def read(self, length=-1):
"""Reads from the response body.
Args:
length: The number of bytes to read.
Returns:
The body of the response.
"""
return self.fp.read(length)
def readline(self):
"""Reads a line from the response body.
Returns:
A line of text from the response body.
"""
return self.fp.readline()
def close(self):
"""Closes the response stream."""
self.fp.close()
class MockOpener(object):
"""A mocked-out OpenerDirector for testing purposes."""
def __init__(self):
"""Creates a new MockOpener."""
self.requests = []
self.responses = {}
self.ordered_responses = {}
self.cookie = None
self.strict = False
def set_strict(self, strict=True):
"""Enables strict mode."""
self.strict = strict
def open(self, request):
"""Logs the request and returns a MockResponse object."""
full_url = request.get_full_url()
if "?" in full_url:
url = full_url[:full_url.find("?")]
else:
url = full_url
if (url != "https://www.google.com/accounts/ClientLogin"
and not url.endswith("_ah/login")):
assert "X-appcfg-api-version" in request.headers
assert "User-agent" in request.headers
request_data = (full_url, bool(request.data))
self.requests.append(request_data)
if self.cookie:
request.headers["Cookie"] = self.cookie
response = self.responses[url](request)
# Use ordered responses in preference to specific response to generic 200.
if url in self.ordered_responses:
logging.debug("Using ordered pre-canned response for: %s" % full_url)
response = self.ordered_responses[url].pop(0)(request)
if not self.ordered_responses[url]:
self.ordered_responses.pop(url)
elif url in self.responses:
logging.debug("Using pre-canned response for: %s" % full_url)
response = self.responses[url](request)
elif self.strict:
raise Exception('No response found for url: %s (%s)' % (url, full_url))
else:
logging.debug("Using generic blank response for: %s" % full_url)
response = TestRpcServerMixin.MockResponse("")
if "Set-Cookie" in response.headers:
self.cookie = response.headers["Set-Cookie"]
# Handle error status codes in the same way as the appengine_rpc openers.
# urllib2 will raise HTTPError for non-2XX status codes, per RFC 2616.
if not (200 <= response.code < 300):
code, msg, hdrs = response.code, response.msg, response.info()
fp = StringIO.StringIO(response.read())
raise urllib2.HTTPError(url=url, code=code, msg=None, hdrs=hdrs, fp=fp)
return response
def AddResponse(self, url, response_func):
"""Calls the provided function when the provided URL is requested.
The provided function should accept a request object and return a
response object.
Args:
url: The URL to trigger on.
response_func: The function to call when the url is requested.
"""
self.responses[url] = response_func
def AddOrderedResponse(self, url, response_func):
"""Calls the provided function when the provided URL is requested.
The provided functions should accept a request object and return a
response object. This response will be added after previously given
responses if they exist.
Args:
url: The URL to trigger on.
response_func: The function to call when the url is requested.
"""
if url not in self.ordered_responses:
self.ordered_responses[url] = []
self.ordered_responses[url].append(response_func)
def AddOrderedResponses(self, url, response_funcs):
"""Calls the provided function when the provided URL is requested.
The provided functions should accept a request object and return a
response object. Each response will be called once.
Args:
url: The URL to trigger on.
response_funcs: A list of response functions.
"""
self.ordered_responses[url] = response_funcs
class TestRpcServer(TestRpcServerMixin, AbstractRpcServer):
pass
class TestHttpRpcServer(TestRpcServerMixin, HttpRpcServer):
pass
class UrlLibRequestResponseStub(object):
def __init__(self, headers=None):
self.headers = {}
if headers:
self.headers = headers
def add_header(self, header, value):
# Note that this does not preserve header order.
# If that's a problem for your tests, add some functionality :)
self.headers[header] = value
class UrlLibRequestStub(UrlLibRequestResponseStub):
pass
class UrlLibResponseStub(UrlLibRequestResponseStub, StringIO.StringIO):
def __init__(self, body, headers, url, code, msg):
UrlLibRequestResponseStub.__init__(self, headers)
if body:
StringIO.StringIO.__init__(self, body)
else:
StringIO.StringIO.__init__(self, "")
self.url = url
self.code = code
self.msg = msg
| bsd-3-clause | -7,914,726,820,681,663,000 | 30.921296 | 86 | 0.663234 | false | 4.166163 | true | false | false |
soulweaver91/batchpatch | batchpatch.py | 1 | 27396 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Soulweaver'
import argparse
import os
import re
import time
import shutil
import colorama
import subprocess
import unicodedata
import gettext
import zipfile
import zlib
from datetime import datetime
from dateutil import tz
from logger import LogLevel, Logger
class BatchPatch:
PROG_NAME = 'BatchPatch'
PROG_VERSION = '0.3'
PROG_URL = 'https://github.com/soulweaver91/batchpatch'
LOCALE_CATALOG = 'batchpatch'
CRC_BUFFER_SIZE = 65536
logger = None
script_options = {
'script_lang': 'en_US',
'script_name': 'apply'
}
patch_options = {
'filename_pattern': None
}
archive_options = {
'create_zip': False,
'zip_name': 'patch'
}
log_level = LogLevel.notice
xdelta_location = ''
locale_dir = ''
def __init__(self):
colorama.init()
self.xdelta_location = os.path.join(BatchPatch.get_install_path(), 'xdelta3.exe')
self.locale_dir = os.path.join(BatchPatch.get_install_path(), 'i18n')
def print_welcome(self):
# Print this even on the highest levels, but not on silent, and without the log prefix
if self.log_level != LogLevel.silent:
print('{} version {}'.format(self.PROG_NAME, self.PROG_VERSION))
def get_version(self):
return self.PROG_VERSION
def get_name(self):
return self.PROG_NAME
def switch_languages(self, lang):
try:
gettext.translation('batchpatch', self.locale_dir, languages=[lang, 'en_US']).install()
except OSError as e:
self.logger.log('Selecting language {} failed: {}'.format(lang, e.strerror), LogLevel.error)
def run(self):
parser = argparse.ArgumentParser(
description="Generates distribution ready patches for anime batch releases."
)
parser.add_argument(
'-o', '--old',
action='store',
help='The path to the folder with the old files. Required.',
required=True,
metavar='directory'
)
parser.add_argument(
'-n', '--new',
action='store',
help='The path to the folder with the new files. Required.',
required=True,
metavar='directory'
)
parser.add_argument(
'-t', '--target',
action='store',
help='The path where the output should be written to. If not specified, '
'a new date stamped subfolder will be written under the current '
'working directory.',
default=self.get_default_output_folder(),
metavar='directory'
)
parser.add_argument(
'-l', '--loglevel',
action='store',
help='The desired verbosity level. Any messages with the same or higher '
'level than the chosen one will be displayed. '
'Available values: debug (most verbose), notice, warning, error, silent '
'(least verbose, does not print anything). Default: notice.',
choices=[e.name for e in LogLevel],
default='notice',
metavar='level'
)
parser.add_argument(
'-x', '--xdelta',
action='store',
help='An alternative location for the xdelta3 executable to search instead of '
'the same directory as the script.',
default=self.xdelta_location,
metavar='path'
)
parser.add_argument(
'-z', '--zip',
action='store_true',
help='Create a ZIP file out of the created patch files.'
)
parser.add_argument(
'-c', '--check-crc',
action='store_true',
help='Verify CRC values of source and target files, if present.'
)
parser.add_argument(
'--zip-name',
action='store',
help='The filename to save the ZIP with. Only meaningful if -z was set.',
default='patch.zip',
metavar='path'
)
parser.add_argument(
'--script-lang',
action='store',
help='The language to use in the generated script.',
default='en_US',
choices=[d for d in os.listdir(self.locale_dir) if os.path.isdir(os.path.join(self.locale_dir, d))],
metavar='lang_code'
)
parser.add_argument(
'--script-name',
action='store',
help='The filename to use for the generated script, without the extension. \'apply\' by default.',
default='apply',
metavar='name'
)
parser.add_argument(
'--patch-pattern',
action='store',
help='The filename to use for the patch files. Consult README.md for available variables.',
default='{name}{specifier_items[0]}_{ep}_v{v_old}v{v_new}.vcdiff',
metavar='name'
)
parser.add_argument(
'-v', '--version',
action='version',
version="{} version {}".format(self.PROG_NAME, self.PROG_VERSION)
)
args = parser.parse_args()
self.log_level = LogLevel[args.loglevel]
self.logger = Logger(self.log_level)
self.script_options['script_lang'] = args.script_lang
self.script_options['script_name'] = args.script_name
self.patch_options['filename_pattern'] = args.patch_pattern
self.archive_options['zip_name'] = args.zip_name
if args.xdelta is not None:
self.xdelta_location = args.xdelta
self.logger.log('Custom xdelta location \'{}\' read from the command line.'.format(args.xdelta),
LogLevel.debug)
self.print_welcome()
self.check_prerequisites(args)
file_pairs = self.identify_file_pairs_by_name(args.old, args.new)
if len(file_pairs) > 0:
# Sort in alphabetical order for nicer output all around
file_pairs.sort(key=lambda item: item[0])
if args.check_crc:
errors = self.check_crcs(file_pairs)
if len(errors) > 0:
self.logger.log('One or more CRC values did not match, cannot proceed.', LogLevel.error)
return
self.generate_patches(file_pairs, args.target)
self.generate_win_script(file_pairs, args.target)
self.copy_executable(args.target)
if args.zip:
self.create_archive(file_pairs, args.target)
self.logger.log('Done.', LogLevel.notice)
else:
self.logger.log('No files to generate patches for.', LogLevel.notice)
def check_prerequisites(self, args):
self.logger.log('Checking prerequisites.', LogLevel.debug)
for p in ('old', 'new', 'target'):
self.logger.log('Verifying existence of {} directory.'.format(p), LogLevel.debug)
try:
path = getattr(args, p)
except AttributeError:
self.logger.log('Expected parameter \'{}\' was missing!'.format(p), LogLevel.error)
exit()
if not os.path.isdir(path):
if p != 'target':
self.logger.log('{} is not a valid path!'.format(path), LogLevel.error)
exit()
else:
if os.path.exists(path):
self.logger.log('\'{}\' exists and is not a directory!'.format(path), LogLevel.error)
exit()
else:
self.logger.log('Creating output directory \'{}\'.'.format(path), LogLevel.notice)
try:
os.makedirs(path)
except OSError as e:
self.logger.log('Error while creating directory \'{]\': {}'.format(path, e.strerror),
LogLevel.error)
exit()
else:
self.logger.log('\'{}\' was found.'.format(path), LogLevel.debug)
self.logger.log('Verifying a xdelta executable is found from the specified location.', LogLevel.debug)
if not os.path.exists(self.xdelta_location) or not os.path.isfile(self.xdelta_location):
self.logger.log('The xdelta3 executable could not be found at \'{}\'!'.format(self.xdelta_location),
LogLevel.error)
self.logger.log('Please download correct version for your system from the xdelta site or', LogLevel.error)
self.logger.log('compile it yourself, and then add it to the same directory as this script', LogLevel.error)
self.logger.log('under the name xdelta3.exe.', LogLevel.error)
exit()
if not os.access(self.xdelta_location, os.X_OK):
self.logger.log('The xdelta3 executable at \'{}\' doesn\'t have execution permissions!'.format(
self.xdelta_location), LogLevel.error
)
exit()
self.logger.log('Prerequisites OK.', LogLevel.debug)
def check_crcs(self, file_pairs):
errors = []
for pair in file_pairs:
for file in [pair[5], pair[6]]:
if file["crc"] is None:
continue
self.logger.log('Calculating CRC for {}...'.format(os.path.basename(file["filename"])), LogLevel.notice)
with open(file["filename"], 'rb') as f:
buffer = f.read(self.CRC_BUFFER_SIZE)
intermediate = 0
while len(buffer) > 0:
intermediate = zlib.crc32(buffer, intermediate)
buffer = f.read(self.CRC_BUFFER_SIZE)
crc = format(intermediate & 0xFFFFFFFF, '08x')
self.logger.log('CRC is {}, filename says {}.'.format(crc, file["crc"]), LogLevel.notice)
if crc.lower() != file["crc"].lower():
self.logger.log('CRCs don\'t match!', LogLevel.error)
errors.append(file["filename"])
return errors
def generate_patches(self, file_pairs, target_dir):
self.logger.log('Generating patches for {} file pairs.'.format(str(len(file_pairs))), LogLevel.debug)
for pair in file_pairs:
self.logger.log('Creating patch: {} -> {}'.format(pair[0], pair[1]), LogLevel.notice)
effective_source = pair[0]
effective_target = pair[1]
temp_source_name = None
temp_target_name = None
if not pair[4]:
temp_source_name = '~' + os.path.basename(pair[2]) + '.src'
temp_target_name = '~' + os.path.basename(pair[2]) + '.dst'
self.logger.log(('Filename is not safe for xdelta on Windows. Copying files to temporary '
'names {} and {}.').format(temp_source_name, temp_target_name), LogLevel.notice)
shutil.copyfile(pair[0], temp_source_name)
shutil.copyfile(pair[1], temp_target_name)
effective_source = temp_source_name
effective_target = temp_target_name
cmd = [
self.xdelta_location,
'-e', # Create patch
'-9', # Use maximum compression
'-s', # Read from file
effective_source, # Old file
effective_target, # New file
os.path.join(target_dir, pair[2]) # Patch destination
]
if self.log_level.numval <= LogLevel.notice.numval:
# Pass verbose flag to xdelta if using a relatively verbose logging level
cmd.insert(2, '-v')
elif self.log_level.numval == LogLevel.silent.numval:
# Pass quiet flag if using the silent logging level
cmd.insert(2, '-q')
try:
self.logger.log('Starting subprocess, command line: {}'.format(" ".join(cmd)), LogLevel.debug)
ret = subprocess.call(cmd)
if ret != 0:
self.logger.log('xdelta returned a non-zero return value {}! '
'This probably means something went wrong.'.format(str(ret)), LogLevel.warning)
if not pair[4]:
self.logger.log('Removing temporary files.'.format(temp_source_name, temp_target_name),
LogLevel.notice)
os.unlink(temp_source_name)
os.unlink(temp_target_name)
except (OSError, IOError) as e:
self.logger.log('Starting the subprocess failed! ' + e.strerror, LogLevel.warning)
def generate_win_script(self, file_pairs, target_dir):
self.switch_languages(self.script_options['script_lang'])
fh = open(os.path.join(target_dir, self.script_options['script_name'] + '.cmd'),
mode='w', newline='\r\n', encoding='utf-8')
self.logger.log('Generating Windows update script.'.format(str(len(file_pairs))), LogLevel.debug)
fh.write('@echo off\n\n')
fh.write('REM Generated by {} version {}\n'.format(self.PROG_NAME, self.PROG_VERSION))
fh.write('REM on {}\n'.format(datetime.now(tz.tzlocal()).strftime("%Y-%m-%d %H:%M:%S %z (%Z)")))
fh.write('REM {}\n\n'.format(self.PROG_URL))
fh.write('setlocal\n')
fh.write('for /f "tokens=2 delims=:." %%x in (\'chcp\') do set cp=%%x\n')
fh.write('chcp 65001 > NUL\n')
fh.write('set pnum=0\n')
fh.write('set nnum=0\n')
fh.write('set fnum=0\n\n')
fh.write('IF NOT EXIST "{}" (\n'.format(os.path.basename(self.xdelta_location)))
fh.write(' echo {msg}\n'.format(
msg=_('The xdelta executable was not found! It is required for this script to work!'))
)
fh.write(' pause\n')
fh.write(' exit /b 1\n')
fh.write(')\n\n')
for pair in file_pairs:
if pair[4]:
fh.write(
(
'IF EXIST "{old}" (\n' +
' IF NOT EXIST "{new}" (\n' +
' echo {msg}\n'.format(msg=_('Patching {old_esc}...')) +
' set /a pnum+=1\n' +
' "{xdelta}" -d -v -s "{old}" "{patch}" "{new}" || (\n' +
' echo {msg}\n'.format(msg=_('Patching {old_esc} failed!')) +
' set /a pnum-=1\n' +
' set /a fnum+=1\n' +
' )\n' +
' ) ELSE (\n' +
' echo {msg}\n'.format(msg=_('{new_esc} already exists, skipping...')) +
' set /a nnum+=1\n' +
' )\n' +
') ELSE (\n' +
' echo {msg}\n'.format(msg=_('{old_esc} not present in folder, skipping...')) +
' set /a nnum+=1\n' +
')\n'
).format(
old=os.path.basename(pair[0]),
new=os.path.basename(pair[1]),
patch=os.path.basename(pair[2]),
old_esc=self.cmd_escape(os.path.basename(pair[0])),
new_esc=self.cmd_escape(os.path.basename(pair[1])),
xdelta=os.path.basename(self.xdelta_location)
)
)
else:
fh.write(
(
'IF EXIST "{old}" (\n' +
' IF NOT EXIST "{new}" (\n' +
' echo {msg}\n'.format(msg=_('Patching {old_esc}...')) +
' set /a pnum+=1\n' +
' REM xdelta unicode incompatibility workaround\n' +
' copy "{old}" "{intermediate_old}" > NUL\n' +
' "{xdelta}" -d -v -s "{intermediate_old}" "{patch}" "{intermediate_new}" || (\n' +
' echo {msg}\n'.format(msg=_('Patching {old_esc} failed!')) +
' set /a pnum-=1\n' +
' set /a fnum+=1\n' +
' )\n' +
' REM xdelta unicode incompatibility workaround\n' +
' move "{intermediate_new}" "{new}" > NUL\n' +
' del "{intermediate_old}" > NUL\n' +
' ) ELSE (\n' +
' echo {msg}\n'.format(msg=_('{new_esc} already exists, skipping...')) +
' set /a nnum+=1\n' +
' )\n' +
') ELSE (\n' +
' echo {msg}\n'.format(msg=_('{old_esc} not present in folder, skipping...')) +
' set /a nnum+=1\n' +
')\n'
).format(
old=os.path.basename(pair[0]),
new=os.path.basename(pair[1]),
intermediate_old=('~' + os.path.basename(pair[2]) + '.src'),
intermediate_new=('~' + os.path.basename(pair[2]) + '.dst'),
patch=os.path.basename(pair[2]),
old_esc=self.cmd_escape(os.path.basename(pair[0])),
new_esc=self.cmd_escape(os.path.basename(pair[1])),
xdelta=os.path.basename(self.xdelta_location)
)
)
fh.write('echo {msg}\n'.format(msg=_('Finished, with %pnum% files patched, %nnum% skipped and %fnum% failed.')))
fh.write('pause\n')
fh.write('chcp %cp% > NUL\n')
fh.close()
self.switch_languages('en_US')
def copy_executable(self, target_dir):
self.logger.log('Copying xdelta to the target folder {}.'.format(target_dir), LogLevel.debug)
shutil.copy(os.path.join(os.getcwd(), self.xdelta_location),
os.path.join(target_dir, os.path.basename(self.xdelta_location)))
def create_archive(self, file_pairs, target_dir):
zip_path = os.path.join(target_dir, self.archive_options['zip_name'])
self.logger.log('Creating a ZIP archive of the patch to \'{}\'.'.format(zip_path), LogLevel.debug)
zipped = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
for pair in file_pairs:
self.logger.log('Writing: {}...'.format(pair[2]), LogLevel.debug)
zipped.write(os.path.join(target_dir, pair[2]), pair[2])
self.logger.log('Writing the patch script...', LogLevel.debug)
zipped.write(os.path.join(target_dir, self.script_options['script_name'] + '.cmd'),
self.script_options['script_name'] + '.cmd')
self.logger.log('Writing the executable...', LogLevel.debug)
zipped.write(os.path.join(target_dir, os.path.basename(self.xdelta_location)),
os.path.basename(self.xdelta_location))
zipped.close()
def identify_file_pairs_by_name(self, old_dir, new_dir):
self.logger.log('Identifying potential file pairs for patching.', LogLevel.debug)
old_files = os.listdir(str(old_dir))
new_files = os.listdir(str(new_dir))
filemap = {}
for file in [self.create_file_entity(f, old_dir) for f in old_files]:
if file is not None:
self.logger.log('Found potential source file: {}'.format(file['filename']), LogLevel.debug)
self.logger.log(' Group {}, series {}, type {} {}, episode {}, version {}'.format(
file['group'],
file['name'],
file['specifier'],
file['ext'],
file['ep'],
file['ver']
), LogLevel.debug)
key = file.get('key')
if key in filemap:
filemap[key][0].append(file)
else:
filemap[key] = ([file], [])
for file in [self.create_file_entity(f, new_dir) for f in new_files]:
if file is not None:
key = file.get('key')
if key in filemap:
self.logger.log('Found potential target file: {}'.format(file['filename']), LogLevel.debug)
self.logger.log(' Group {}, series {}, type {} {}, episode {}, version {}'.format(
file['group'],
file['name'],
file['specifier'],
file['ext'],
file['ep'],
file['ver']
), LogLevel.debug)
filemap[key][1].append(file)
else:
# There were no matching files in the old directory, so this won't be a candidate for patching.
self.logger.log('Ignoring target file with no equivalent source: {}'.format(file['filename']),
LogLevel.debug)
# Let's prune those source files that were found that have no target equivalents.
item_cnt = len(filemap)
filemap = {k: v for (k, v) in filemap.items() if len(v[1]) >= 1}
if len(filemap) < item_cnt:
diff = item_cnt - len(filemap)
self.logger.log('Dropped {} source candidate{} with no equivalent targets.'.format(
str(diff), '' if diff == 1 else 's'), LogLevel.debug)
resolved_relations = []
for key, group in filemap.items():
highest_source = max(group[0], key=lambda x: x['ver'])
highest_target = max(group[1], key=lambda x: x['ver'])
if highest_source['ver'] == highest_target['ver']:
self.logger.log('Source and target versions of {} are both {}, ignoring the group.'.format(
key, highest_target['ver']
), LogLevel.debug)
continue
patch_name = self.get_patch_name(highest_source, highest_target)
# TODO: refactor, these are too complex and confusing to be tuples anymore
resolved_relations.append((highest_source['filename'], highest_target['filename'], patch_name,
highest_target['key'],
self.is_name_windows_safe(os.path.basename(highest_source['filename'])) and
self.is_name_windows_safe(os.path.basename(highest_target['filename'])),
highest_source,
highest_target))
self.logger.log('Queued: {} -> {}, patch name: {}'.format(
highest_source['filename'], highest_target['filename'], patch_name
), LogLevel.debug)
return resolved_relations
@staticmethod
def cmd_escape(s):
return re.sub(r'([\[\]\(\)^<>|])', r'^\1', s)
def get_patch_name(self, source, target):
try:
return self.patch_options['filename_pattern'].format(
raw_group=source['group'],
raw_name=source['name'],
raw_ep=source['ep'],
raw_specifier=source['specifier'],
raw_ext=source['ext'],
group=BatchPatch.neutralize_str(source['group']),
name=BatchPatch.neutralize_str(source['name']),
ep=BatchPatch.neutralize_str(source['ep']),
specifier=BatchPatch.neutralize_str(source['specifier']),
specifier_items=[BatchPatch.neutralize_str(s) for s in (
source['specifier'].split() if len(source['specifier']) > 0 else ['']
)],
type=BatchPatch.neutralize_str(source['specifier'] + source['ext']),
ext=BatchPatch.neutralize_str(source['ext']),
v_old=source['ver'],
v_new=target['ver'],
hash_old=source['crc'],
hash_new=target['crc']
)
except KeyError as e:
self.logger.log('Invalid variable {} in patch name pattern!'.format(e.args[0]), LogLevel.error)
exit()
@staticmethod
def create_file_entity(filename, basedir):
matcher = re.compile('(?#1. Group shortname)(?:\[([^\]]+?)\] )?'
'(?#2. Main name)(.+?)'
'(?#3. Episode specifier)(?: - ([a-zA-Z]*\d*))?'
'(?#4. Version specifier)(?:v(\d*))?'
'(?#5. Other specifiers)(?: \(([^\)]*)\))?'
'(?#6. CRC hash)(?: \[([0-9a-fA-F]{8})\])?'
'(?# Eat all extension-looking parts except the last one)(?:\..+)?'
'\.'
'(?# Do not match torrents)(?!torrent$)'
'(?#7. Get the file extension)([^\.]+)$')
match = matcher.match(filename)
if match:
path = os.path.join(basedir, match.group(0))
ver = match.group(4)
if ver is None:
ver = 1
specifier = match.group(5)
if specifier is None:
specifier = ''
return {
"key": "/".join([match.group(x) for x in [1, 2, 3, 5, 7] if isinstance(match.group(x), str)]),
"ver": int(ver),
"group": match.group(1),
"name": match.group(2),
"ep": match.group(3),
"specifier": specifier,
"crc": match.group(6),
"ext": match.group(7),
"filename": path
}
else:
return None
@staticmethod
def get_default_output_folder():
return os.path.join(os.getcwd(), 'batch-' + time.strftime('%Y-%m-%d-%H-%M'))
@staticmethod
def neutralize_str(name):
s = unicodedata.normalize('NFKD', name)
s = u"".join([c for c in s if not unicodedata.combining(c)])
return re.sub(r'[^a-z0-9_-]', '_', s.casefold())
unsafe_windows_filenames = [
'CON', 'PRN', 'AUX', 'NUL', 'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9',
'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9'
]
@staticmethod
def is_name_windows_safe(name):
""" Verifies if the filename can be passed to xdelta on Windows. The user's codepage can be whatever,
so only accept a subset of the 7-bit ASCII as safe values.
"""
return name not in BatchPatch.unsafe_windows_filenames and \
name == re.sub(r'[^ !#$%&()+,\-.0-9;=@A-Z\[\]^_`a-z{}]', r'', name)
@staticmethod
def get_install_path():
return os.path.dirname(os.path.realpath(__file__))
if __name__ == "__main__":
gettext.install('batchpatch', os.path.join(BatchPatch.get_install_path(), 'i18n'))
prog = BatchPatch()
prog.run()
| mit | -1,857,567,898,504,627,200 | 42.974318 | 120 | 0.501643 | false | 4.09997 | false | false | false |
dionisos2/CI | src/centres_of_interest_manager.py | 1 | 14888 | """
See CentresOfInterestManager class
"""
import re
import mylib.checking as checking
from mylib.string_op import replace_special_char
from mylib.notifier import Notifier
from xml.dom import minidom
from lxml import etree
from centre_of_interest import CentreOfInterest
def identity(x):
return x
class CentresOfInterestManager:
"""
Class that permit to create/load lists of ci(center of interest),
and to export them in different formats.
"""
def __init__(self, list_of_ci=None, notifier=None):
assert not(list_of_ci) or\
checking.is_all_instance(list_of_ci, CentreOfInterest)
self.ci_dtd = "ci.dtd"
self.ci_graph_dtd = "ci_graph.dtd"
# Templates html tags
self.html_start_list = "<ul>\n"
self.html_end_list = "</ul>\n"
self.html_date = "<h2>{date}</h2>\n"
self.html_item = '<li><a href="{url}">{name}</a></li>\n'
# Templates graphviz tags
self.dot_start_graph = "digraph CI {\n" +\
" node [fontcolor=red, fontsize=8];\n"
self.dot_end_graph = "}"
self.dot_official_item = ' "{name_official}"[URL="{url}", style=filled, fillcolor="0 0 0"];\n'
self.dot_unofficial_item = ' "{name_unofficial}"[URL="{url}", style=filled, fillcolor="0 0 0"];\n'
self.dot_without_url_item = ' "{name_without_url}"[style=filled, fillcolor="0 0 0"];\n'
self.dot_item_child = ' "{name_official}"->"{child}";\n'
if notifier is not None:
assert isinstance(notifier, Notifier)
self._only_official = False
self._notifier = notifier
if list_of_ci is None:
self._list_of_ci = []
else:
self._list_of_ci = list_of_ci
def notify(self, text):
"""
notify something to the user (use the Notifier object)
"""
if self._notifier is not None:
self._notifier.notify(text)
def __iter__(self):
for centre_of_interest in self._list_of_ci:
yield centre_of_interest
def __len__(self):
return len(self._list_of_ci)
@property
def list_of_ci(self):
""" get the list of ci managed """
return self._list_of_ci
def append(self, centre_of_interest):
""" add a new centre of interest to be managed """
assert isinstance(centre_of_interest, CentreOfInterest)
self._list_of_ci.append(centre_of_interest)
def __str__(self):
tmp = ""
for centre_of_interest in self._list_of_ci:
tmp += str(centre_of_interest)
return tmp
def find(self, ci_name):
""" find a centre of interest by name """
assert isinstance(ci_name, str)
for centre_of_interest in self:
if centre_of_interest.name == ci_name:
return centre_of_interest
return None
def verify_xml(self, xml_file_path, dtd_file_path):
with open(dtd_file_path, 'r', encoding='utf-8') as dtd_file:
with open(xml_file_path, 'r', encoding='utf-8') as xml_file:
dtd = etree.DTD(dtd_file)
root = etree.parse(xml_file)
if not dtd.validate(root):
raise IOError('Not valide according to "' + dtd_file_path +
'"\n' +
str(dtd.error_log.filter_from_errors()[0]))
def delete_unwanted_ci(self):
if self._only_official:
self._list_of_ci = [ci for ci in self._list_of_ci if ci.official]
for ci in self._list_of_ci:
ci.children = [child for child in ci.children if child.official]
def load_xml(self, xml_file, only_official=False, with_link=True):
""" load all the centres of interest from a xml file """
self.notify('load xml_file "' + xml_file + '"')
self.verify_xml(xml_file, self.ci_dtd)
self._list_of_ci = []
self._only_official = only_official
doc = minidom.parse(xml_file)
for ci_node in doc.documentElement.getElementsByTagName("CI"):
name = self._get_element(ci_node, "name")
if with_link:
#url == None, if the <url> balise is empty
url = self._get_element(ci_node, "url")
else:
url = ''
date = self._get_element(ci_node, "date")
official = self._get_element(ci_node, "official")
centre_of_interest = CentreOfInterest(name, url, date)
centre_of_interest.official = official
self.append(centre_of_interest)
def load_children(self, ci_graph_file):
"""
Make the link between the centres of interest and their children
"""
self.verify_xml(ci_graph_file, self.ci_graph_dtd)
doc = minidom.parse(ci_graph_file)
for ci_node in doc.documentElement.getElementsByTagName("CI"):
ci_name = ci_node.getElementsByTagName("name")[0].firstChild.nodeValue
centre_of_interest = self.find(ci_name)
if centre_of_interest is None:
raise ValueError('"' + ci_name + '" found in "' +
ci_graph_file + '" doesn\'t exist in ci.xml')
children_node = ci_node.getElementsByTagName("children")[0]
child_nodes = children_node.getElementsByTagName("child")
for child in child_nodes:
if child.firstChild is None:
raise ValueError("void child balise in '" + ci_name + "'")
else:
child_name = child.firstChild.nodeValue
child_ci = self.find(child_name)
if child_ci is not None:
centre_of_interest.add_child(child_ci)
else:
raise ValueError("try to add the child : '" +
child_name +
"' to '" +
ci_name +
"' but the child was not found")
@classmethod
def _get_element(cls, ci_node, element):
"""
Get the element 'element', of the centre of interest node 'ci_node'
"""
node = ci_node.getElementsByTagName(element)[0]
if node.firstChild is None:
return None
else:
return node.firstChild.nodeValue
def sorted_by_name(self, translate=None):
"""
Return the list of CI sorted by name.
:param translate: a function used to translate the CI name,
translate(ci_name)=ci_name_translated
:type translate: function
"""
if translate is not None:
return sorted(self._list_of_ci, key=lambda ci: translate(ci.name))
else:
return sorted(self._list_of_ci, key=lambda ci: ci.name)
def sorted_by_date(self, translate=None):
"""
Return the list of CI sorted by date.
:param translate: a function used to translate the CI name,
translate(ci_name)=ci_name_translated
:type translate: function
"""
if translate is None:
translate = identity
def get_date_name(centre_of_interest):
""" return a couple (ci_date, ci_name), to sort the list """
if centre_of_interest.date is not None:
return (centre_of_interest.date,
translate(centre_of_interest.name))
else:
return ("", translate(centre_of_interest.name))
return sorted(self._list_of_ci, key=get_date_name)
def load_template_dot(self, dot_file_path):
self.notify('load dot template file "' + dot_file_path + '"')
def get_match(match, message):
if not match:
raise IOError(message)
else:
return match.group(1)
with open(dot_file_path, 'r', encoding='utf-8') as dot_file:
template = dot_file.read()
start_graph = re.search(r'^(.*)// official ci start',
template,
re.DOTALL)
self.dot_start_graph = get_match(start_graph,
"Incorrect dot template, can’t find start")
end_graph = re.search(r'// child end(.*)$', template, re.DOTALL)
self.dot_end_graph = get_match(end_graph, "Incorrect dot template, can’t find end")
official_item = re.search(r'// official ci start(.*)// official ci end',
template,
re.DOTALL)
self.dot_official_item = get_match(official_item,
"Incorrect dot template, can’t find official ci item")
unofficial_item = re.search(r'// unofficial ci start(.*)// unofficial ci end',
template,
re.DOTALL)
self.dot_unofficial_item = get_match(unofficial_item,
"Incorrect dot template, can’t find unofficial ci item")
without_url_item = re.search(r'// without_url start(.*)// without_url end',
template,
re.DOTALL)
self.dot_without_url_item = get_match(without_url_item,
"Incorrect dot template, can’t find without url ci item")
item_child = re.search(r'// child start(.*)// child end',
template,
re.DOTALL)
self.dot_item_child = get_match(item_child,
"Incorrect dot template, can’t find child ci item")
def load_template_html(self, html_file_path):
self.notify('load html template file "' + html_file_path + '"')
with open(html_file_path, 'r', encoding='utf-8') as html_file:
template = html_file.read()
start_list = re.search(r'^(.*)<!-- date -->', template, re.DOTALL)
if not start_list:
raise IOError("Incorrect html template, can’t find start")
else:
self.html_start_list = start_list.group(1)
end_list = re.search(r'<!-- /item -->(.*)$', template, re.DOTALL)
if not end_list:
raise IOError("Incorrect html template, can’t find end")
else:
self.html_end_list = end_list.group(1)
date = re.search(r'<!-- date -->(.*)<!-- /date -->',
template,
re.DOTALL)
if not date:
raise IOError("Incorrect html template, can’t find date")
else:
self.html_date = date.group(1)
item = re.search(r'<!-- item -->(.*)<!-- /item -->',
template,
re.DOTALL)
if not item:
raise IOError("Incorrect html template, can’t find item")
else:
self.html_item = item.group(1)
def to_html_list(self, order="by_name", translate=None):
"""
Export the sorted list of CI to html.
:param order: choose "by_name" to sort by name and "by_date" to sort by date
:param translate: a function used to translate the CI name,
translate(ci_name)=ci_name_translated
:type order: str
:type translate: function
:return: return a string corresponding of the html page
"""
self.delete_unwanted_ci()
if translate is None:
translate = identity
string = self.html_start_list
if order == "by_name":
sorted_list_of_ci = self.sorted_by_name(translate)
elif order == "by_date":
sorted_list_of_ci = self.sorted_by_date(translate)
else:
raise ValueError("order should be 'by_name', or 'by_date'. '" +
order +
"' given.")
if (order == "by_date")and(len(sorted_list_of_ci) > 0):
date = sorted_list_of_ci[0].date
if date is not None:
str_date = date
else:
str_date = "unknown"
string += self.html_date.replace('{date}', str_date)
for centre_of_interest in sorted_list_of_ci:
if (order == "by_date")and(centre_of_interest.date != date):
date = centre_of_interest.date
if date is not None:
str_date = date
else:
str_date = "unknown"
string += self.html_date.replace('{date}', str_date)
if centre_of_interest.url is not None:
item = self.html_item.replace('{url}', centre_of_interest.url)
item = item.replace('{name}',
translate(centre_of_interest.name))
string += item
string += self.html_end_list
return string
def to_graphviz(self, ci_graph_file, translate=None):
"""
Export the sorted list of CI to a graphviz dot format.
:param translate: a function used to translate the CI name,
translate(ci_name)=ci_name_translated
:type translate: function
:return: return a string corresponding of the dot file
"""
self.load_children(ci_graph_file)
self.delete_unwanted_ci()
if translate is None:
translate = identity
string = self.dot_start_graph
for centre_of_interest in self:
if centre_of_interest.url is None or centre_of_interest.url == '':
dot_template = self.dot_without_url_item
else:
if centre_of_interest.official:
dot_template = self.dot_official_item
else:
dot_template = self.dot_unofficial_item
item_name = translate(centre_of_interest.name)
item = re.sub(r'{name.*?}', item_name, dot_template)
if centre_of_interest.url is not None:
item = re.sub(r'{url}', centre_of_interest.url, item)
string += item
for child in centre_of_interest.children:
item_child = re.sub(r'{name.*?}', item_name,
self.dot_item_child)
item_child = re.sub(r'{child}', translate(child.name),
item_child)
string += item_child
string += self.dot_end_graph
return replace_special_char(string)
| gpl-2.0 | -8,393,297,476,626,185,000 | 37.921466 | 107 | 0.52159 | false | 4.135744 | false | false | false |
sanja7s/SR_Twitter | src_general/explain_FORMATION_DELETION_REL.py | 1 | 6415 | #!/usr/bin/env python
# a bar plot with errorbars
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Polygon
from pylab import *
width = 0.28 # the width of the bars
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 14}
matplotlib.rc('font', **font)
# plot with various axes scales
plt.figure(1)
fig = gcf()
def plot_bars_FORMATION_STRONG_REL(PersistingMeans, PersistingStd, Means, Std, PERSreal, PERSstd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(321)
ax = plt.subplot2grid((1,2),(0, 0))
#rects1 = ax.bar(ind-0.2, PersistingMeans, width, color='c', yerr=PersistingStd, align='center')
#rects2 = ax.bar(ind+0.2, Means, width, color='cyan', yerr=Std, align='center')
rects1 = ax.bar(ind-width, PersistingMeans, width, color='darkred', \
align='center', yerr=PersistingStd, linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects2 = ax.bar(ind, Means, width, color='lightcoral', \
yerr=Std, align='center', linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects3 = ax.bar(ind+width, PERSreal, width, color='r',\
yerr=PERSstd, align='center',linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
ax.legend((rects1[0], rects2[0], rects3[0]), \
('Formed and persisting', \
'Formed and non-persisting', 'Persisting average'),\
frameon=False)
# add some text for labels, title and axes ticks
#ax.set_title('Relative status (strong contacts)')
ax.set_xticks(ind )
ax.set_xticklabels(('Before', 'At formation', 'After'))
ax.set_ylim([-0.5, 5])
ax.set_yticks((0,5))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
return plt
N = 3
##########################################################################
# NON PERSISTING LINKS
# STRONG contacts REL
formationDeletionMeans = (1.12747979427, 1.56808719079, 1.62160176341)
formationDeletionStd = (1.35650452374, 1.71205560699, 1.83913259462)
# PERSISTING LINKS
# STRONG contacts REL
formationNodeletionMeans = (0.964889222681, 1.44874202028, 1.68794592565)
formationNodeletionStd = (1.30256068643, 1.64860382968, 1.94388833634)
SRMeans = (0.856632, 0.906697, 0.995124, 1.010403, 1.031534)
SRStd = (1.114944, 1.194131, 1.283704, 1.245234, 1.317081)
SRMeansS = (0.96007799999999988,0.96007799999999988,0.96007799999999988)
SRStdS = (1.2310188,1.2310188,1.2310188)
plt1 = plot_bars_FORMATION_STRONG_REL(formationNodeletionMeans, formationNodeletionStd,\
formationDeletionMeans, formationDeletionStd, SRMeansS, SRStdS)
def plot_bars_DELETION_STRONG_REL(PersistingMeans, PersistingStd, Means, Std, PERSreal, PERSstd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(321)
ax = plt.subplot2grid((1,2),(0, 1))
#rects1 = ax.bar(ind-0.2, PersistingMeans, width, color='c', yerr=PersistingStd, align='center')
#rects2 = ax.bar(ind+0.2, Means, width, color='cyan', yerr=Std, align='center')
rects1 = ax.bar(ind-width, PersistingMeans, width, color='c', \
align='center', yerr=PersistingStd, linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects2 = ax.bar(ind, Means, width, color='cyan', \
yerr=Std, align='center', linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects3 = ax.bar(ind+width, PERSreal, width, color='r',\
yerr=PERSstd, align='center',linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
ax.legend((rects1[0], rects2[0], rects3[0]), \
('Persisting decommissioned', \
'Non-persisting decommissioned', 'Persisting average'),\
loc='best',frameon=False)
# add some text for labels, title and axes ticks
#ax.set_title('Relative status (strong contacts)')
ax.set_xticks(ind )
ax.set_xticklabels(('Before', 'At decommission', 'After'))
ax.set_ylim([-0.5, 5])
ax.set_yticks((0,5))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
return plt
##########################################################################
# NON PERSISTING LINKS
# STRONG contacts REL
#deletionFormationMeans = (1.35860783095, 1.40335612181, 1.38222498446)
#deletionFormationStd = (1.39698763227, 1.515042018, 1.6001731639)
deletionFormationMeans = (1.21614009307, 1.58645603723, 1.613397012)
deletionFormationStd = (1.39228801763, 1.73298601092, 1.84822380219)
# PERSISTING LINKS
#deletionNoformationMeans = (1.16101995042, 1.52591193484, 1.54066816196)
#deletionNoformationStd = (1.36105887603, 1.69996084625, 1.80123581372)
deletionNoformationMeans = (1.09195402299, 1.16457680251, 1.09717868339)
deletionNoformationStd = (1.25857893939, 1.33146910699, 1.31900439894)
SRMeans = (0.856632, 0.906697, 0.995124, 1.010403, 1.031534)
SRStd = (1.114944, 1.194131, 1.283704, 1.245234, 1.317081)
SRMeansS = (0.96007799999999988,0.96007799999999988,0.96007799999999988)
SRStdS = (1.2310188,1.2310188,1.2310188)
plt1 = plot_bars_DELETION_STRONG_REL(deletionNoformationMeans, deletionNoformationStd,\
deletionFormationMeans, deletionFormationStd, SRMeansS, SRStdS)
##########################################################################
plt.tight_layout()
fig = plt.gcf()
fig.set_size_inches(12.4,4.5)
plt.tight_layout()
#plt.figtext(0.20, 0.49, 'Relative status of the pair: weak contacts')
#plt.figtext(0.27, 0.973, 'Relative status of the pair: strong contacts')
fig.suptitle('Relative status (strong contacts)', verticalalignment='center', horizontalalignment='center', size = 16)
#fig.suptitle('Sum including weak contacts', verticalalignment='center', y=0.5, horizontalalignment='center', size = 16)
plt.savefig("/home/sscepano/Projects7s/Twitter-workspace/DATA/General/explain_FORMATION_DELETION_REL.eps", dpi=710)
| mit | -5,203,250,858,654,813,000 | 33.304813 | 120 | 0.677631 | false | 2.70219 | false | false | false |
LittleRichard/luxalert | luxweb/luxweb/spiders/KensingtonSpider.py | 1 | 4229 | import datetime
import re
import scrapy
from nest.storage.luxalert.entity.Apartment import Apartment
from nest.storage.luxalert.entity.ApartmentSnapshot import ApartmentSnapshot
from luxweb.luxweb import HMTL_SPIDER_DATA_TUPLE_KEY
from luxweb.luxweb.ScrapeErrorHandler import ScrapeErrorHandler
from luxweb.luxweb.spiders.AbstractHTMLSpider import AbstractHTMLSpider
class KensingtonSpider(AbstractHTMLSpider):
THE_KENSINGTON_NAME = 'The Kensington'
BUILDING_NAMES = (
THE_KENSINGTON_NAME,
)
# name of the spider, a scrapy-required thing
name = "kensington"
@classmethod
def get_building_names(cls):
return KensingtonSpider.BUILDING_NAMES
def start_requests(self):
# urls to scrape
urls = [
'http://www.kensingtonboston.com/floor-plans/apartments'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
@ScrapeErrorHandler.wrap_to_raise
def parse(self, response):
buildings_by_name = self.get_buildings_by_name(KensingtonSpider.BUILDING_NAMES)
# loops through all <div< with the class = "plan_detail"
for plan in response.xpath('//div[@class="plan_detail"]'):
# unit: extract first element in <h3> list
unit = str(plan.xpath('div[@class="plan_info"]/h3/text()').extract_first())
# floor: 2nd character if first character is 0, otherwise first 2 characters
if unit[0] == "0":
floor = str(unit[1])
else:
floor = str(unit[:2])
# bedrooms: if bedroom is a studio update text, else grab first character
if plan.xpath('div[@class="plan_info"]/ul/li[1]/text()').extract_first() == "STUDIO":
bedrooms = int(0)
else:
bedrooms = int(plan.xpath('div[@class="plan_info"]/ul/li[1]/text()').extract_first()[0])
# bathrooms: first character from string
bathrooms_str = plan.xpath('div[@class="plan_info"]/ul/li[2]/text()').extract_first()
bathrooms_str = re.sub(u' BATH.+', u'', bathrooms_str)
bathrooms = float(bathrooms_str)
# sq_ft: remove "SQ. FEET" and ",""
sq_ft = plan.xpath('div[@class="plan_info"]/ul/li[3]/text()').extract_first()
sq_ft = sq_ft.replace("SQ. FEET", "")
sq_ft = sq_ft.replace(",", "")
sq_ft = int(sq_ft)
# price: remove "FROM $" and "/MONTH" and ","
price = plan.xpath('div[@class="plan_info"]/ul/li[4]/text()').extract_first()
price = price.replace("FROM $", "")
price = price.replace("/MONTH", "")
price = price.replace(",", "")
price = float(price)
# availability: from 10th character onwards, change "NOW" to today's date
if plan.xpath('div[@class="plan_info"]/ul/li[5]/text()').extract_first()[10:] == "NOW":
availability = datetime.datetime.utcnow().date()
else:
availability_str = str(plan.xpath('div[@class="plan_info"]/ul/li[5]/text()').extract_first()[10:])
availability = datetime.datetime.strptime(availability_str, '%m/%d/%Y').date()
# floor_plan
floor_plan = str(plan.xpath('div[@class="plan_image desktop_and_tab"]/img/@src').extract_first())
building = buildings_by_name[KensingtonSpider.THE_KENSINGTON_NAME]
apartment = Apartment(
building,
floor,
sq_ft,
bathrooms,
bedrooms,
unit,
)
apartment_snap = ApartmentSnapshot(
apartment,
datetime.datetime.utcnow(),
price,
availability,
floor_plan
)
yield {HMTL_SPIDER_DATA_TUPLE_KEY: (apartment, apartment_snap)}
next_page = response.xpath('//a[@rel="next"]/@href').extract_first()
if (next_page is not None) and (next_page != "javascript:void(0);"):
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
| gpl-3.0 | 332,383,384,053,203,200 | 36.424779 | 114 | 0.572476 | false | 3.719437 | false | false | false |
clovertrail/cloudinit-bis | cloudinit/config/cc_scripts_per_boot.py | 1 | 1760 | # vi: ts=4 expandtab
#
# Copyright (C) 2011 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <[email protected]>
# Author: Juerg Haefliger <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Scripts Per Boot
----------------
**Summary:** run per boot scripts
Any scripts in the ``scripts/per-boot`` directory on the datasource will be run
every time the system boots. Scripts will be run in alphabetical order. This
module does not accept any config keys.
**Internal name:** ``cc_scripts_per_boot``
**Module frequency:** per always
**Supported distros:** all
"""
import os
from cloudinit import util
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
SCRIPT_SUBDIR = 'per-boot'
def handle(name, _cfg, cloud, log, _args):
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
util.runparts(runparts_path)
except Exception:
log.warn("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
raise
| gpl-3.0 | 9,048,880,670,074,391,000 | 29.877193 | 79 | 0.696591 | false | 3.527054 | false | false | false |
AMOboxTV/AMOBox.LegoBuild | plugin.video.salts/scrapers/onlinedizi_scraper.py | 1 | 4676 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from salts_lib import dom_parser
from salts_lib import kodi
from salts_lib import log_utils
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
import scraper
BASE_URL = 'http://onlinedizi.co'
class OnlineDizi_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'OnlineDizi'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s' % (item['quality'], item['host'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.25)
fragment = dom_parser.parse_dom(html, 'ul', {'class': 'dropdown-menu'})
if fragment:
match = re.search('''href=['"]([^'"]+)[^>]*>(?:Altyaz.{1,3}s.{1,3}z)<''', fragment[0])
if match:
option_url = urlparse.urljoin(self.base_url, match.group(1))
html = self._http_get(option_url, cache_limit=2)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'video-player'})
if fragment:
iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
if iframe_url:
html = self._http_get(iframe_url[0], cache_limit=.25)
iframe_url = dom_parser.parse_dom(html, 'iframe', {'id': 'ifr'}, ret='src')
if iframe_url:
html = self._http_get(iframe_url[0], allow_redirect=False, method='HEAD', cache_limit=.25)
if html.startswith('http'):
stream_url = html
host = urlparse.urlparse(stream_url).hostname
stream_url += '|User-Agent=%s' % (scraper_utils.get_ua())
quality = QUALITIES.HIGH
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': False}
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._default_get_url(video)
def _get_episode_url(self, show_url, video):
episode_pattern = '''href=['"]([^'"]+-%s-sezon-%s-bolum[^'"]*)''' % (video.season, video.episode)
return self._default_get_episode_url(show_url, video, episode_pattern)
def search(self, video_type, title, year, season=''):
html = self._http_get(self.base_url, cache_limit=48)
results = []
seen_urls = {}
norm_title = scraper_utils.normalize_title(title)
for fragment in dom_parser.parse_dom(html, 'ul', {'class': '[^"]*all-series-list[^"]*'}):
for match in re.finditer('''href=["']([^'"]+)[^>]+>([^<]+)''', fragment):
url, match_title = match.groups()
if url not in seen_urls:
seen_urls[url] = True
if norm_title in scraper_utils.normalize_title(match_title):
result = {'url': scraper_utils.pathify_url(url), 'title': scraper_utils.cleanse_title(match_title), 'year': ''}
results.append(result)
return results
| gpl-2.0 | -8,868,857,414,232,558,000 | 43.533333 | 182 | 0.566938 | false | 3.858086 | false | false | false |
hailongqiu/new-deepin-media-player | src/plugins/youku/youku_web_parse.py | 1 | 5006 | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2013 XXX, Inc.
# 2013 红铭曼,王芳
#
# Author: 红铭曼,王芳 <[email protected]>
# Maintainer: 红铭曼,王芳 <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from BeautifulSoup import BeautifulSoup
import urllib2
import re
class YoukuWebParse(object):
def __init__(self):
self.headers = {"Accept":"*/*", "Accept-Language":"zh-CN", "":"",
"User-Agent":"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)",
#"Accept-Encoding":"gzip, deflate",
"Connection":"Keep-Alive"}
def scan_movie_leave(self, addr):
temp_info = None
url = addr
req = urllib2.Request(url=url, headers=self.headers)
data = urllib2.urlopen(url).read()
#
sounp = BeautifulSoup(data)
music_list = sounp.findAll("a", {"class":"btnShow btnplayposi"})
for link in music_list:
addr = link.get("href") # 获取地址.
title = link.get("title") # 获取标题.
temp_info = (addr, title)
return temp_info
def scan_3_leave(self, addr):
url = addr
req = urllib2.Request(url=url, headers=self.headers)
data = urllib2.urlopen(url).read()
#
sounp = BeautifulSoup(data)
p_title_list = sounp.findAll("a",
{"href": re.compile("http://"),
"title" : re.compile("\d"),
"charset" : re.compile("-"),
"target" : re.compile('_')
})
temp_list = []
#print p_title_list
for list_ in p_title_list:
addr_ = list_.get("href")
name_ = list_.get("title")
#print name_, addr_
temp_list.append((addr_, name_))
return temp_list
def parse_web(self, addr, index=1):
page_num = None
all_sum = None
info_list = []
url = addr + "%d.html" % (index)
#print url
#data = urllib2.urlopen(url).read()
req = urllib2.Request(url=url, headers=self.headers)
data = urllib2.urlopen(url).read()
#
sounp = BeautifulSoup(data)
p_title_list = sounp.findAll('li', {"class" : "p_title"})
for link in p_title_list:
a_link = link.a # <a href = "......" title.....> 中的 'a'.
addr = a_link.get("href") # 获取地址.
title = a_link.get("title") # 获取标题.
#print "addr:", addr, "title:", title
info_list.append((addr, title))
if index == 1:
page_num = len(p_title_list)
#print "link len:", page_num
all_sum_str = sounp.findAll("div", {"class" : "stat"})
all_sum_utf_8 = str(all_sum_str[0].string).replace("条", "")
all_sum = int(str(all_sum_utf_8.split("/")[1].strip()))
#print "总数:", all_sum
return info_list, page_num, all_sum
def get_sum_page(all_sum, page_num):
page_sum = all_sum / page_num
page_mod = all_sum % page_num
if page_mod > 0:
page_sum += 1
return page_sum
if __name__ == "__main__":
from youku_web import v_olist_dict
v_olist_keys = v_olist_dict.keys()
youku_web_parse = YoukuWebParse()
#youku_web_parse.parse_web("http://www.youku.com/show_page/id_zcc001eb6962411de83b1.html")
#youku_web_parse.parse_web("http://www.youku.com/show_page/id_zcc000b60962411de83b1.html")
#youku_web_parse.parse_web("http://www.youku.com/show_page/id_z84933d227a4911e1b2ac.html")
#youku_web_parse.parse_web("http://www.youku.com/show_page/id_z8820e97ecfeb11e19013.html")
#youku_web_parse.parse_web("http://www.youku.com/show_page/id_z0bb2a948c24311df97c0.html")
info_list, page_num, all_sum = youku_web_parse.parse_web(v_olist_dict["热血"])
'''
info_list, page_num, all_sum = youku_web_parse.parse_web(v_olist_dict["格斗"])
info_list, page_num, all_sum = youku_web_parse.parse_web(v_olist_dict["恋爱"])
print get_sum_page(all_sum, page_num)
print get_sum_page(all_sum, page_num)
'''
for i in range(1, get_sum_page(all_sum, page_num + 1)):
info_list, page_num, all_sum = youku_web_parse.parse_web(v_olist_dict["热血"], i)
for info in info_list:
print info[0], info[1]
| gpl-3.0 | 5,523,252,859,593,766,000 | 36.257576 | 94 | 0.573607 | false | 3.08532 | false | false | false |
srznew/heat | heat/engine/resources/openstack/nova/server.py | 1 | 65002 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import uuidutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import function
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.neutron import subnet
from heat.engine.resources import stack_user
from heat.engine import support
from heat.rpc import api as rpc_api
cfg.CONF.import_opt('default_software_config_transport', 'heat.common.config')
cfg.CONF.import_opt('stack_scheduler_hints', 'heat.common.config')
LOG = logging.getLogger(__name__)
class Server(stack_user.StackUser):
PROPERTIES = (
NAME, IMAGE, BLOCK_DEVICE_MAPPING, BLOCK_DEVICE_MAPPING_V2,
FLAVOR, FLAVOR_UPDATE_POLICY, IMAGE_UPDATE_POLICY, KEY_NAME,
ADMIN_USER, AVAILABILITY_ZONE, SECURITY_GROUPS, NETWORKS,
SCHEDULER_HINTS, METADATA, USER_DATA_FORMAT, USER_DATA,
RESERVATION_ID, CONFIG_DRIVE, DISK_CONFIG, PERSONALITY,
ADMIN_PASS, SOFTWARE_CONFIG_TRANSPORT
) = (
'name', 'image', 'block_device_mapping', 'block_device_mapping_v2',
'flavor', 'flavor_update_policy', 'image_update_policy', 'key_name',
'admin_user', 'availability_zone', 'security_groups', 'networks',
'scheduler_hints', 'metadata', 'user_data_format', 'user_data',
'reservation_id', 'config_drive', 'diskConfig', 'personality',
'admin_pass', 'software_config_transport'
)
_BLOCK_DEVICE_MAPPING_KEYS = (
BLOCK_DEVICE_MAPPING_DEVICE_NAME, BLOCK_DEVICE_MAPPING_VOLUME_ID,
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
) = (
'device_name', 'volume_id',
'snapshot_id',
'volume_size',
'delete_on_termination',
)
_BLOCK_DEVICE_MAPPING_V2_KEYS = (
BLOCK_DEVICE_MAPPING_DEVICE_NAME,
BLOCK_DEVICE_MAPPING_VOLUME_ID,
BLOCK_DEVICE_MAPPING_IMAGE_ID,
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
BLOCK_DEVICE_MAPPING_SWAP_SIZE,
BLOCK_DEVICE_MAPPING_DEVICE_TYPE,
BLOCK_DEVICE_MAPPING_DISK_BUS,
BLOCK_DEVICE_MAPPING_BOOT_INDEX,
BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
) = (
'device_name',
'volume_id',
'image_id',
'snapshot_id',
'swap_size',
'device_type',
'disk_bus',
'boot_index',
'volume_size',
'delete_on_termination',
)
_NETWORK_KEYS = (
NETWORK_UUID, NETWORK_ID, NETWORK_FIXED_IP, NETWORK_PORT,
) = (
'uuid', 'network', 'fixed_ip', 'port',
)
_SOFTWARE_CONFIG_FORMATS = (
HEAT_CFNTOOLS, RAW, SOFTWARE_CONFIG
) = (
'HEAT_CFNTOOLS', 'RAW', 'SOFTWARE_CONFIG'
)
_SOFTWARE_CONFIG_TRANSPORTS = (
POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE
) = (
'POLL_SERVER_CFN', 'POLL_SERVER_HEAT', 'POLL_TEMP_URL', 'ZAQAR_MESSAGE'
)
ATTRIBUTES = (
NAME_ATTR, ADDRESSES, NETWORKS_ATTR, FIRST_ADDRESS,
INSTANCE_NAME, ACCESSIPV4, ACCESSIPV6, CONSOLE_URLS,
) = (
'name', 'addresses', 'networks', 'first_address',
'instance_name', 'accessIPv4', 'accessIPv6', 'console_urls',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Server name.'),
update_allowed=True
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the image to boot with.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
update_allowed=True
),
BLOCK_DEVICE_MAPPING: properties.Schema(
properties.Schema.LIST,
_('Block device mappings for this server.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda.'),
required=True
),
BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the volume to boot from. Only one '
'of volume_id or snapshot_id should be '
'provided.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create a volume '
'from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the volume, in GB. It is safe to '
'leave this blank and have the Compute service '
'infer the size.')
),
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
'when the server is terminated.')
),
},
)
),
BLOCK_DEVICE_MAPPING_V2: properties.Schema(
properties.Schema.LIST,
_('Block device mappings v2 for this server.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda.'),
),
BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The volume_id can be boot or non-boot device '
'to the server.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
BLOCK_DEVICE_MAPPING_IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the image to create a volume from.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
),
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create a volume '
'from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BLOCK_DEVICE_MAPPING_SWAP_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the swap, in MB.')
),
BLOCK_DEVICE_MAPPING_DEVICE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Device type: at the moment we can make distinction'
' only between disk and cdrom.'),
constraints=[
constraints.AllowedValues(['cdrom', 'disk']),
],
),
BLOCK_DEVICE_MAPPING_DISK_BUS: properties.Schema(
properties.Schema.STRING,
_('Bus of the device: hypervisor driver chooses a '
'suitable default if omitted.'),
constraints=[
constraints.AllowedValues(['ide', 'lame_bus',
'scsi', 'usb',
'virtio']),
],
),
BLOCK_DEVICE_MAPPING_BOOT_INDEX: properties.Schema(
properties.Schema.INTEGER,
_('Integer used for ordering the boot disks.'),
),
BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Size of the block device in GB. If it is omitted, '
'hypervisor driver calculates size.'),
),
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
'when the server is terminated.')
),
},
),
support_status=support.SupportStatus(version='2015.1')
),
FLAVOR: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the flavor to boot onto.'),
required=True,
update_allowed=True,
constraints=[
constraints.CustomConstraint('nova.flavor')
]
),
FLAVOR_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply a flavor update; either by requesting '
'a server resize or by replacing the entire server.'),
default='RESIZE',
constraints=[
constraints.AllowedValues(['RESIZE', 'REPLACE']),
],
update_allowed=True
),
IMAGE_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply an image-id update; either by '
'requesting a server rebuild or by replacing the entire server'),
default='REBUILD',
constraints=[
constraints.AllowedValues(['REBUILD', 'REPLACE',
'REBUILD_PRESERVE_EPHEMERAL']),
],
update_allowed=True
),
KEY_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of keypair to inject into the server.'),
constraints=[
constraints.CustomConstraint('nova.keypair')
]
),
ADMIN_USER: properties.Schema(
properties.Schema.STRING,
_('Name of the administrative user to use on the server.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('The default cloud-init user set up for each image '
'(e.g. "ubuntu" for Ubuntu 12.04+, "fedora" for '
'Fedora 19+ and "cloud-user" for CentOS/RHEL 6.5).'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.1',
previous_status=support.SupportStatus(version='2013.2')
)
)
),
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_('Name of the availability zone for server placement.')
),
SECURITY_GROUPS: properties.Schema(
properties.Schema.LIST,
_('List of security group names or IDs. Cannot be used if '
'neutron ports are associated with this server; assign '
'security groups to the ports instead.'),
default=[]
),
NETWORKS: properties.Schema(
properties.Schema.LIST,
_('An ordered list of nics to be added to this server, with '
'information about connected networks, fixed ips, port etc.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
NETWORK_UUID: properties.Schema(
properties.Schema.STRING,
_('ID of network to create a port on.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s.') % NETWORK_ID,
version='2014.1'
)
),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
NETWORK_ID: properties.Schema(
properties.Schema.STRING,
_('Name or ID of network to create a port on.'),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
NETWORK_FIXED_IP: properties.Schema(
properties.Schema.STRING,
_('Fixed IP address to specify for the port '
'created on the requested network.'),
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
NETWORK_PORT: properties.Schema(
properties.Schema.STRING,
_('ID of an existing port to associate with this '
'server.'),
constraints=[
constraints.CustomConstraint('neutron.port')
]
),
},
),
update_allowed=True
),
SCHEDULER_HINTS: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key-value pairs specified by the client to help '
'boot a server.')
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata to store for this server. Both '
'keys and values must be 255 characters or less. Non-string '
'values will be serialized to JSON (and the serialized '
'string must be 255 characters or less).'),
update_allowed=True
),
USER_DATA_FORMAT: properties.Schema(
properties.Schema.STRING,
_('How the user_data should be formatted for the server. For '
'HEAT_CFNTOOLS, the user_data is bundled as part of the '
'heat-cfntools cloud-init boot configuration data. For RAW '
'the user_data is passed to Nova unmodified. '
'For SOFTWARE_CONFIG user_data is bundled as part of the '
'software config data, and metadata is derived from any '
'associated SoftwareDeployment resources.'),
default=HEAT_CFNTOOLS,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_FORMATS),
]
),
SOFTWARE_CONFIG_TRANSPORT: properties.Schema(
properties.Schema.STRING,
_('How the server should receive the metadata required for '
'software configuration. POLL_SERVER_CFN will allow calls to '
'the cfn API action DescribeStackResource authenticated with '
'the provided keypair. POLL_SERVER_HEAT will allow calls to '
'the Heat API resource-show using the provided keystone '
'credentials. POLL_TEMP_URL will create and populate a '
'Swift TempURL with metadata for polling.'),
default=cfg.CONF.default_software_config_transport,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_TRANSPORTS),
]
),
USER_DATA: properties.Schema(
properties.Schema.STRING,
_('User data script to be executed by cloud-init.'),
default=''
),
RESERVATION_ID: properties.Schema(
properties.Schema.STRING,
_('A UUID for the set of servers being requested.')
),
CONFIG_DRIVE: properties.Schema(
properties.Schema.BOOLEAN,
_('If True, enable config drive on the server.')
),
DISK_CONFIG: properties.Schema(
properties.Schema.STRING,
_('Control how the disk is partitioned when the server is '
'created.'),
constraints=[
constraints.AllowedValues(['AUTO', 'MANUAL']),
]
),
PERSONALITY: properties.Schema(
properties.Schema.MAP,
_('A map of files to create/overwrite on the server upon boot. '
'Keys are file names and values are the file contents.'),
default={}
),
ADMIN_PASS: properties.Schema(
properties.Schema.STRING,
_('The administrator password for the server.'),
update_allowed=True
),
}
attributes_schema = {
NAME_ATTR: attributes.Schema(
_('Name of the server.'),
type=attributes.Schema.STRING
),
ADDRESSES: attributes.Schema(
_('A dict of all network addresses with corresponding port_id. '
'Each network will have two keys in dict, they are network '
'name and network id. '
'The port ID may be obtained through the following expression: '
'"{get_attr: [<server>, addresses, <network name_or_id>, 0, '
'port]}".'),
type=attributes.Schema.MAP
),
NETWORKS_ATTR: attributes.Schema(
_('A dict of assigned network addresses of the form: '
'{"public": [ip1, ip2...], "private": [ip3, ip4], '
'"public_uuid": [ip1, ip2...], "private_uuid": [ip3, ip4]}. '
'Each network will have two keys in dict, they are network '
'name and network id. '),
type=attributes.Schema.MAP
),
FIRST_ADDRESS: attributes.Schema(
_('Convenience attribute to fetch the first assigned network '
'address, or an empty string if nothing has been assigned at '
'this time. Result may not be predictable if the server has '
'addresses from more than one network.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('Use the networks attribute instead of '
'first_address. For example: "{get_attr: '
'[<server name>, networks, <network name>, 0]}"'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2',
previous_status=support.SupportStatus(version='2013.2')
)
)
),
INSTANCE_NAME: attributes.Schema(
_('AWS compatible instance name.'),
type=attributes.Schema.STRING
),
ACCESSIPV4: attributes.Schema(
_('The manually assigned alternative public IPv4 address '
'of the server.'),
type=attributes.Schema.STRING
),
ACCESSIPV6: attributes.Schema(
_('The manually assigned alternative public IPv6 address '
'of the server.'),
type=attributes.Schema.STRING
),
CONSOLE_URLS: attributes.Schema(
_("URLs of server's consoles. "
"To get a specific console type, the requested type "
"can be specified as parameter to the get_attr function, "
"e.g. get_attr: [ <server>, console_urls, novnc ]. "
"Currently supported types are "
"novnc, xvpvnc, spice-html5, rdp-html5, serial."),
support_status=support.SupportStatus(version='2015.1'),
type=attributes.Schema.MAP
),
}
# Server host name limit to 53 characters by due to typical default
# linux HOST_NAME_MAX of 64, minus the .novalocal appended to the name
physical_resource_name_limit = 53
default_client_name = 'nova'
entity = 'servers'
def translation_rules(self):
return [properties.TranslationRule(
self.properties,
properties.TranslationRule.REPLACE,
source_path=[self.NETWORKS, self.NETWORK_ID],
value_name=self.NETWORK_UUID)]
def __init__(self, name, json_snippet, stack):
super(Server, self).__init__(name, json_snippet, stack)
if self.user_data_software_config():
self._register_access_key()
def _server_name(self):
name = self.properties[self.NAME]
if name:
return name
return self.physical_resource_name()
def _config_drive(self):
# This method is overridden by the derived CloudServer resource
return self.properties[self.CONFIG_DRIVE]
def _populate_deployments_metadata(self, meta):
meta['deployments'] = meta.get('deployments', [])
if self.transport_poll_server_heat():
meta['os-collect-config'] = {'heat': {
'user_id': self._get_user_id(),
'password': self.password,
'auth_url': self.context.auth_url,
'project_id': self.stack.stack_user_project_id,
'stack_id': self.stack.identifier().stack_path(),
'resource_name': self.name}
}
if self.transport_zaqar_message():
queue_id = self.physical_resource_name()
self.data_set('metadata_queue_id', queue_id)
zaqar_plugin = self.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(
self.stack.stack_user_project_id)
queue = zaqar.queue(queue_id)
queue.post({'body': meta, 'ttl': zaqar_plugin.DEFAULT_TTL})
meta['os-collect-config'] = {'zaqar': {
'user_id': self._get_user_id(),
'password': self.password,
'auth_url': self.context.auth_url,
'project_id': self.stack.stack_user_project_id,
'queue_id': queue_id}
}
elif self.transport_poll_server_cfn():
meta['os-collect-config'] = {'cfn': {
'metadata_url': '%s/v1/' % cfg.CONF.heat_metadata_server_url,
'access_key_id': self.access_key,
'secret_access_key': self.secret_key,
'stack_name': self.stack.name,
'path': '%s.Metadata' % self.name}
}
elif self.transport_poll_temp_url():
container = self.physical_resource_name()
object_name = str(uuid.uuid4())
self.client('swift').put_container(container)
url = self.client_plugin('swift').get_temp_url(
container, object_name, method='GET')
put_url = self.client_plugin('swift').get_temp_url(
container, object_name)
self.data_set('metadata_put_url', put_url)
self.data_set('metadata_object_name', object_name)
meta['os-collect-config'] = {'request': {
'metadata_url': url}
}
self.client('swift').put_object(
container, object_name, jsonutils.dumps(meta))
self.metadata_set(meta)
def _register_access_key(self):
'''
Access is limited to this resource, which created the keypair
'''
def access_allowed(resource_name):
return resource_name == self.name
if self.transport_poll_server_cfn():
self.stack.register_access_allowed_handler(
self.access_key, access_allowed)
elif self.transport_poll_server_heat():
self.stack.register_access_allowed_handler(
self._get_user_id(), access_allowed)
def _create_transport_credentials(self):
if self.transport_poll_server_cfn():
self._create_user()
self._create_keypair()
elif (self.transport_poll_server_heat() or
self.transport_zaqar_message()):
self.password = uuid.uuid4().hex
self._create_user()
self._register_access_key()
@property
def access_key(self):
return self.data().get('access_key')
@property
def secret_key(self):
return self.data().get('secret_key')
@property
def password(self):
return self.data().get('password')
@password.setter
def password(self, password):
if password is None:
self.data_delete('password')
else:
self.data_set('password', password, True)
def user_data_raw(self):
return self.properties[self.USER_DATA_FORMAT] == self.RAW
def user_data_software_config(self):
return self.properties[
self.USER_DATA_FORMAT] == self.SOFTWARE_CONFIG
def transport_poll_server_cfn(self):
return self.properties[
self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_SERVER_CFN
def transport_poll_server_heat(self):
return self.properties[
self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_SERVER_HEAT
def transport_poll_temp_url(self):
return self.properties[
self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_TEMP_URL
def transport_zaqar_message(self):
return self.properties.get(
self.SOFTWARE_CONFIG_TRANSPORT) == self.ZAQAR_MESSAGE
def get_software_config(self, ud_content):
try:
sc = self.rpc_client().show_software_config(
self.context, ud_content)
return sc[rpc_api.SOFTWARE_CONFIG_CONFIG]
except Exception as ex:
self.rpc_client().ignore_error_named(ex, 'NotFound')
return ud_content
def handle_create(self):
security_groups = self.properties[self.SECURITY_GROUPS]
user_data_format = self.properties[self.USER_DATA_FORMAT]
ud_content = self.properties[self.USER_DATA]
if self.user_data_software_config() or self.user_data_raw():
if uuidutils.is_uuid_like(ud_content):
# attempt to load the userdata from software config
ud_content = self.get_software_config(ud_content)
metadata = self.metadata_get(True) or {}
if self.user_data_software_config():
self._create_transport_credentials()
self._populate_deployments_metadata(metadata)
userdata = self.client_plugin().build_userdata(
metadata,
ud_content,
instance_user=None,
user_data_format=user_data_format)
flavor = self.properties[self.FLAVOR]
availability_zone = self.properties[self.AVAILABILITY_ZONE]
image = self.properties[self.IMAGE]
if image:
image = self.client_plugin('glance').get_image_id(image)
flavor_id = self.client_plugin().get_flavor_id(flavor)
instance_meta = self.properties[self.METADATA]
if instance_meta is not None:
instance_meta = self.client_plugin().meta_serialize(
instance_meta)
scheduler_hints = self.properties[self.SCHEDULER_HINTS]
if cfg.CONF.stack_scheduler_hints:
if scheduler_hints is None:
scheduler_hints = {}
scheduler_hints['heat_root_stack_id'] = self.stack.root_stack_id()
scheduler_hints['heat_stack_id'] = self.stack.id
scheduler_hints['heat_stack_name'] = self.stack.name
scheduler_hints['heat_path_in_stack'] = self.stack.path_in_stack()
scheduler_hints['heat_resource_name'] = self.name
nics = self._build_nics(self.properties[self.NETWORKS])
block_device_mapping = self._build_block_device_mapping(
self.properties[self.BLOCK_DEVICE_MAPPING])
block_device_mapping_v2 = self._build_block_device_mapping_v2(
self.properties[self.BLOCK_DEVICE_MAPPING_V2])
reservation_id = self.properties[self.RESERVATION_ID]
disk_config = self.properties[self.DISK_CONFIG]
admin_pass = self.properties[self.ADMIN_PASS] or None
personality_files = self.properties[self.PERSONALITY]
key_name = self.properties[self.KEY_NAME]
server = None
try:
server = self.client().servers.create(
name=self._server_name(),
image=image,
flavor=flavor_id,
key_name=key_name,
security_groups=security_groups,
userdata=userdata,
meta=instance_meta,
scheduler_hints=scheduler_hints,
nics=nics,
availability_zone=availability_zone,
block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
reservation_id=reservation_id,
config_drive=self._config_drive(),
disk_config=disk_config,
files=personality_files,
admin_pass=admin_pass)
finally:
# Avoid a race condition where the thread could be canceled
# before the ID is stored
if server is not None:
self.resource_id_set(server.id)
return server.id
def check_create_complete(self, server_id):
return self.client_plugin()._check_active(server_id)
def handle_check(self):
server = self.client().servers.get(self.resource_id)
status = self.client_plugin().get_status(server)
checks = [{'attr': 'status', 'expected': 'ACTIVE', 'current': status}]
self._verify_check_conditions(checks)
@classmethod
def _build_block_device_mapping(cls, bdm):
if not bdm:
return None
bdm_dict = {}
for mapping in bdm:
mapping_parts = []
snapshot_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if snapshot_id:
mapping_parts.append(snapshot_id)
mapping_parts.append('snap')
else:
volume_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID)
mapping_parts.append(volume_id)
mapping_parts.append('')
volume_size = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE)
delete = mapping.get(cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
if volume_size:
mapping_parts.append(str(volume_size))
else:
mapping_parts.append('')
if delete:
mapping_parts.append(str(delete))
device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME)
bdm_dict[device_name] = ':'.join(mapping_parts)
return bdm_dict
@classmethod
def _build_block_device_mapping_v2(cls, bdm_v2):
if not bdm_v2:
return None
bdm_v2_list = []
for mapping in bdm_v2:
bmd_dict = None
if mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID),
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID),
'source_type': 'snapshot',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE_ID),
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE):
bmd_dict = {
'source_type': 'blank',
'destination_type': 'local',
'boot_index': -1,
'delete_on_termination': True,
'guest_format': 'swap',
'volume_size': mapping.get(
cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE),
}
update_props = (cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME,
cls.BLOCK_DEVICE_MAPPING_DEVICE_TYPE,
cls.BLOCK_DEVICE_MAPPING_DISK_BUS,
cls.BLOCK_DEVICE_MAPPING_BOOT_INDEX,
cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
for update_prop in update_props:
if mapping.get(update_prop) is not None:
bmd_dict[update_prop] = mapping.get(update_prop)
if bmd_dict:
bdm_v2_list.append(bmd_dict)
return bdm_v2_list
def _build_nics(self, networks):
if not networks:
return None
nics = []
for net_data in networks:
nic_info = {}
net_identifier = (net_data.get(self.NETWORK_UUID) or
net_data.get(self.NETWORK_ID))
if net_identifier:
if self.is_using_neutron():
net_id = (self.client_plugin(
'neutron').resolve_network(
net_data, self.NETWORK_ID, self.NETWORK_UUID))
else:
net_id = (self.client_plugin(
'nova').get_nova_network_id(net_identifier))
nic_info['net-id'] = net_id
if net_data.get(self.NETWORK_FIXED_IP):
ip = net_data[self.NETWORK_FIXED_IP]
if netutils.is_valid_ipv6(ip):
nic_info['v6-fixed-ip'] = ip
else:
nic_info['v4-fixed-ip'] = ip
if net_data.get(self.NETWORK_PORT):
nic_info['port-id'] = net_data[self.NETWORK_PORT]
nics.append(nic_info)
return nics
def _add_port_for_address(self, server):
"""Method adds port id to list of addresses.
This method is used only for resolving attributes.
"""
nets = copy.deepcopy(server.addresses)
ifaces = server.interface_list()
ip_mac_mapping_on_port_id = dict(((iface.fixed_ips[0]['ip_address'],
iface.mac_addr), iface.port_id)
for iface in ifaces)
for net_name in nets:
for addr in nets[net_name]:
addr['port'] = ip_mac_mapping_on_port_id.get(
(addr['addr'], addr['OS-EXT-IPS-MAC:mac_addr']))
return self._extend_networks(nets)
def _extend_networks(self, networks):
"""Method adds same networks with replaced name on network id.
This method is used only for resolving attributes.
"""
nets = copy.deepcopy(networks)
for key in list(nets.keys()):
try:
net_id = self.client_plugin().get_net_id_by_label(key)
except (exception.NovaNetworkNotFound,
exception.PhysicalResourceNameAmbiguity):
net_id = None
if net_id:
nets[net_id] = nets[key]
return nets
def _resolve_attribute(self, name):
if name == self.FIRST_ADDRESS:
return self.client_plugin().server_to_ipaddress(
self.resource_id) or ''
if name == self.NAME_ATTR:
return self._server_name()
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return ''
if name == self.ADDRESSES:
return self._add_port_for_address(server)
if name == self.NETWORKS_ATTR:
return self._extend_networks(server.networks)
if name == self.INSTANCE_NAME:
return getattr(server, 'OS-EXT-SRV-ATTR:instance_name', None)
if name == self.ACCESSIPV4:
return server.accessIPv4
if name == self.ACCESSIPV6:
return server.accessIPv6
if name == self.CONSOLE_URLS:
return self.client_plugin('nova').get_console_urls(server)
def add_dependencies(self, deps):
super(Server, self).add_dependencies(deps)
# Depend on any Subnet in this template with the same
# network_id as the networks attached to this server.
# It is not known which subnet a server might be assigned
# to so all subnets in a network should be created before
# the servers in that network.
nets = self.properties[self.NETWORKS]
if not nets:
return
for res in six.itervalues(self.stack):
if res.has_interface('OS::Neutron::Subnet'):
subnet_net = (res.properties.get(subnet.Subnet.NETWORK_ID)
or res.properties.get(subnet.Subnet.NETWORK))
for net in nets:
# worry about network_id because that could be the match
# assigned to the subnet as well and could have been
# created by this stack. Regardless, the server should
# still wait on the subnet.
net_id = (net.get(self.NETWORK_ID) or
net.get(self.NETWORK_UUID))
if net_id and net_id == subnet_net:
deps += (self, res)
break
def _get_network_matches(self, old_networks, new_networks):
# make new_networks similar on old_networks
for new_net in new_networks:
for key in ('port', 'network', 'fixed_ip', 'uuid'):
# if new_net.get(key) is '', convert to None
if not new_net.get(key):
new_net[key] = None
for old_net in old_networks:
for key in ('port', 'network', 'fixed_ip', 'uuid'):
# if old_net.get(key) is '', convert to None
if not old_net.get(key):
old_net[key] = None
# find matches and remove them from old and new networks
not_updated_networks = []
for net in old_networks:
if net in new_networks:
new_networks.remove(net)
not_updated_networks.append(net)
for net in not_updated_networks:
old_networks.remove(net)
return not_updated_networks
def _get_network_id(self, net):
net_id = None
if net.get(self.NETWORK_ID):
if self.is_using_neutron():
net_id = self.client_plugin(
'neutron').resolve_network(
net,
self.NETWORK_ID, self.NETWORK_UUID)
else:
net_id = self.client_plugin(
'nova').get_nova_network_id(net.get(self.NETWORK_ID))
return net_id
def update_networks_matching_iface_port(self, nets, interfaces):
def find_equal(port, net_id, ip, nets):
for net in nets:
if (net.get('port') == port or
(net.get('fixed_ip') == ip and
(self._get_network_id(net) == net_id or
net.get('uuid') == net_id))):
return net
def find_poor_net(net_id, nets):
for net in nets:
if (not net.get('port') and not net.get('fixed_ip') and
(self._get_network_id(net) == net_id or
net.get('uuid') == net_id)):
return net
for iface in interfaces:
# get interface properties
props = {'port': iface.port_id,
'net_id': iface.net_id,
'ip': iface.fixed_ips[0]['ip_address'],
'nets': nets}
# try to match by port or network_id with fixed_ip
net = find_equal(**props)
if net is not None:
net['port'] = props['port']
continue
# find poor net that has only network_id
net = find_poor_net(props['net_id'], nets)
if net is not None:
net['port'] = props['port']
def _update_flavor(self, prop_diff):
flavor_update_policy = (
prop_diff.get(self.FLAVOR_UPDATE_POLICY) or
self.properties[self.FLAVOR_UPDATE_POLICY])
flavor = prop_diff[self.FLAVOR]
if flavor_update_policy == 'REPLACE':
raise resource.UpdateReplace(self.name)
flavor_id = self.client_plugin().get_flavor_id(flavor)
handler_args = {'args': (flavor_id,)}
checker_args = {'args': (flavor_id, flavor)}
prg_resize = progress.ServerUpdateProgress(self.resource_id,
'resize',
handler_extra=handler_args,
checker_extra=checker_args)
prg_verify = progress.ServerUpdateProgress(self.resource_id,
'verify_resize')
return prg_resize, prg_verify
def _update_image(self, prop_diff):
image_update_policy = (
prop_diff.get(self.IMAGE_UPDATE_POLICY) or
self.properties[self.IMAGE_UPDATE_POLICY])
if image_update_policy == 'REPLACE':
raise resource.UpdateReplace(self.name)
image = prop_diff[self.IMAGE]
image_id = self.client_plugin('glance').get_image_id(image)
preserve_ephemeral = (
image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL')
password = (prop_diff.get(self.ADMIN_PASS) or
self.properties[self.ADMIN_PASS])
kwargs = {'password': password,
'preserve_ephemeral': preserve_ephemeral}
prg = progress.ServerUpdateProgress(self.resource_id,
'rebuild',
handler_extra={'args': (image_id,),
'kwargs': kwargs})
return prg
def _update_networks(self, server, prop_diff):
updaters = []
new_networks = prop_diff.get(self.NETWORKS)
attach_first_free_port = False
if not new_networks:
new_networks = []
attach_first_free_port = True
old_networks = self.properties[self.NETWORKS]
if not server:
server = self.client().servers.get(self.resource_id)
interfaces = server.interface_list()
# if old networks is None, it means that the server got first
# free port. so we should detach this interface.
if old_networks is None:
for iface in interfaces:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args': (iface.port_id,)})
)
# if we have any information in networks field, we should:
# 1. find similar networks, if they exist
# 2. remove these networks from new_networks and old_networks
# lists
# 3. detach unmatched networks, which were present in old_networks
# 4. attach unmatched networks, which were present in new_networks
else:
# remove not updated networks from old and new networks lists,
# also get list these networks
not_updated_networks = self._get_network_matches(
old_networks, new_networks)
self.update_networks_matching_iface_port(
old_networks + not_updated_networks, interfaces)
# according to nova interface-detach command detached port
# will be deleted
for net in old_networks:
if net.get(self.NETWORK_PORT):
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args':
(net.get(self.NETWORK_PORT),)})
)
handler_kwargs = {'port_id': None, 'net_id': None, 'fip': None}
# attach section similar for both variants that
# were mentioned above
for net in new_networks:
if net.get(self.NETWORK_PORT):
handler_kwargs['port_id'] = net.get(self.NETWORK_PORT)
elif net.get(self.NETWORK_ID):
handler_kwargs['net_id'] = self._get_network_id(net)
handler_kwargs['fip'] = net.get('fixed_ip')
elif net.get(self.NETWORK_UUID):
handler_kwargs['net_id'] = net['uuid']
handler_kwargs['fip'] = net.get('fixed_ip')
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True,
handler_extra={'kwargs': handler_kwargs})
)
# if new_networks is None, we should attach first free port,
# according to similar behavior during instance creation
if attach_first_free_port:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True)
)
return updaters
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if 'Metadata' in tmpl_diff:
self.metadata_set(tmpl_diff['Metadata'])
updaters = []
server = None
if self.METADATA in prop_diff:
server = self.client().servers.get(self.resource_id)
self.client_plugin().meta_update(server,
prop_diff[self.METADATA])
if self.FLAVOR in prop_diff:
updaters.extend(self._update_flavor(prop_diff))
if self.IMAGE in prop_diff:
updaters.append(self._update_image(prop_diff))
elif self.ADMIN_PASS in prop_diff:
if not server:
server = self.client().servers.get(self.resource_id)
server.change_password(prop_diff[self.ADMIN_PASS])
if self.NAME in prop_diff:
if not server:
server = self.client().servers.get(self.resource_id)
self.client_plugin().rename(server, prop_diff[self.NAME])
if self.NETWORKS in prop_diff:
updaters.extend(self._update_networks(server, prop_diff))
# NOTE(pas-ha) optimization is possible (starting first task
# right away), but we'd rather not, as this method already might
# have called several APIs
return updaters
def check_update_complete(self, updaters):
'''Push all updaters to completion in list order.'''
for prg in updaters:
if not prg.called:
handler = getattr(self.client_plugin(), prg.handler)
prg.called = handler(*prg.handler_args,
**prg.handler_kwargs)
return False
if not prg.complete:
check_complete = getattr(self.client_plugin(), prg.checker)
prg.complete = check_complete(*prg.checker_args,
**prg.checker_kwargs)
break
return all(prg.complete for prg in updaters)
def metadata_update(self, new_metadata=None):
'''
Refresh the metadata if new_metadata is None
'''
if new_metadata is None:
# Re-resolve the template metadata and merge it with the
# current resource metadata. This is necessary because the
# attributes referenced in the template metadata may change
# and the resource itself adds keys to the metadata which
# are not specified in the template (e.g the deployments data)
meta = self.metadata_get(refresh=True) or {}
tmpl_meta = self.t.metadata()
meta.update(tmpl_meta)
self.metadata_set(meta)
@staticmethod
def _check_maximum(count, maximum, msg):
'''
Check a count against a maximum, unless maximum is -1 which indicates
that there is no limit
'''
if maximum != -1 and count > maximum:
raise exception.StackValidationFailed(message=msg)
def _validate_block_device_mapping(self):
# either volume_id or snapshot_id needs to be specified, but not both
# for block device mapping.
bdm = self.properties[self.BLOCK_DEVICE_MAPPING] or []
bootable_vol = False
for mapping in bdm:
device_name = mapping[self.BLOCK_DEVICE_MAPPING_DEVICE_NAME]
if device_name == 'vda':
bootable_vol = True
volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if volume_id is not None and snapshot_id is not None:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if volume_id is None and snapshot_id is None:
msg = _('Either volume_id or snapshot_id must be specified for'
' device mapping %s') % device_name
raise exception.StackValidationFailed(message=msg)
bdm_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] or []
if bdm and bdm_v2:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING, self.BLOCK_DEVICE_MAPPING_V2)
for mapping in bdm_v2:
volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
image_id = mapping.get(self.BLOCK_DEVICE_MAPPING_IMAGE_ID)
swap_size = mapping.get(self.BLOCK_DEVICE_MAPPING_SWAP_SIZE)
property_tuple = (volume_id, snapshot_id, image_id, swap_size)
if property_tuple.count(None) < 3:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
self.BLOCK_DEVICE_MAPPING_IMAGE_ID,
self.BLOCK_DEVICE_MAPPING_SWAP_SIZE)
if property_tuple.count(None) == 4:
msg = _('Either volume_id, snapshot_id, image_id or '
'swap_size must be specified.')
raise exception.StackValidationFailed(message=msg)
if any((volume_id, snapshot_id, image_id)):
bootable_vol = True
return bootable_vol
def _validate_network(self, network):
if (network.get(self.NETWORK_ID) is None
and network.get(self.NETWORK_PORT) is None
and network.get(self.NETWORK_UUID) is None):
msg = _('One of the properties "%(id)s", "%(port_id)s", '
'"%(uuid)s" should be set for the '
'specified network of server "%(server)s".'
'') % dict(id=self.NETWORK_ID,
port_id=self.NETWORK_PORT,
uuid=self.NETWORK_UUID,
server=self.name)
raise exception.StackValidationFailed(message=msg)
if network.get(self.NETWORK_UUID) and network.get(self.NETWORK_ID):
msg = _('Properties "%(uuid)s" and "%(id)s" are both set '
'to the network "%(network)s" for the server '
'"%(server)s". The "%(uuid)s" property is deprecated. '
'Use only "%(id)s" property.'
'') % dict(uuid=self.NETWORK_UUID,
id=self.NETWORK_ID,
network=network[self.NETWORK_ID],
server=self.name)
raise exception.StackValidationFailed(message=msg)
elif network.get(self.NETWORK_UUID):
LOG.info(_LI('For the server "%(server)s" the "%(uuid)s" '
'property is set to network "%(network)s". '
'"%(uuid)s" property is deprecated. Use '
'"%(id)s" property instead.'),
dict(uuid=self.NETWORK_UUID,
id=self.NETWORK_ID,
network=network[self.NETWORK_ID],
server=self.name))
def validate(self):
'''
Validate any of the provided params
'''
super(Server, self).validate()
bootable_vol = self._validate_block_device_mapping()
# make sure the image exists if specified.
image = self.properties[self.IMAGE]
if not image and not bootable_vol:
msg = _('Neither image nor bootable volume is specified for'
' instance %s') % self.name
raise exception.StackValidationFailed(message=msg)
# network properties 'uuid' and 'network' shouldn't be used
# both at once for all networks
networks = self.properties[self.NETWORKS] or []
# record if any networks include explicit ports
networks_with_port = False
for network in networks:
networks_with_port = (networks_with_port or
network.get(self.NETWORK_PORT))
self._validate_network(network)
# retrieve provider's absolute limits if it will be needed
metadata = self.properties[self.METADATA]
personality = self.properties[self.PERSONALITY]
if metadata is not None or personality:
limits = self.client_plugin().absolute_limits()
# if 'security_groups' present for the server and explict 'port'
# in one or more entries in 'networks', raise validation error
if networks_with_port and self.properties[self.SECURITY_GROUPS]:
raise exception.ResourcePropertyConflict(
self.SECURITY_GROUPS,
"/".join([self.NETWORKS, self.NETWORK_PORT]))
# verify that the number of metadata entries is not greater
# than the maximum number allowed in the provider's absolute
# limits
if metadata is not None:
msg = _('Instance metadata must not contain greater than %s '
'entries. This is the maximum number allowed by your '
'service provider') % limits['maxServerMeta']
self._check_maximum(len(metadata),
limits['maxServerMeta'], msg)
# verify the number of personality files and the size of each
# personality file against the provider's absolute limits
if personality:
msg = _("The personality property may not contain "
"greater than %s entries.") % limits['maxPersonality']
self._check_maximum(len(personality),
limits['maxPersonality'], msg)
for path, contents in personality.items():
msg = (_("The contents of personality file \"%(path)s\" "
"is larger than the maximum allowed personality "
"file size (%(max_size)s bytes).") %
{'path': path,
'max_size': limits['maxPersonalitySize']})
self._check_maximum(len(bytes(contents.encode('utf-8'))),
limits['maxPersonalitySize'], msg)
def _delete_temp_url(self):
object_name = self.data().get('metadata_object_name')
if not object_name:
return
try:
container = self.physical_resource_name()
swift = self.client('swift')
swift.delete_object(container, object_name)
headers = swift.head_container(container)
if int(headers['x-container-object-count']) == 0:
swift.delete_container(container)
except Exception as ex:
self.client_plugin('swift').ignore_not_found(ex)
def _delete_queue(self):
queue_id = self.data().get('metadata_queue_id')
if not queue_id:
return
client_plugin = self.client_plugin('zaqar')
zaqar = client_plugin.create_for_tenant(
self.stack.stack_user_project_id)
try:
zaqar.queue(queue_id).delete()
except Exception as ex:
client_plugin.ignore_not_found(ex)
self.data_delete('metadata_queue_id')
def handle_snapshot_delete(self, state):
if state[0] != self.FAILED:
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
return progress.ServerDeleteProgress(
self.resource_id, image_id, False)
return self.handle_delete()
def handle_delete(self):
if self.resource_id is None:
return
if self.user_data_software_config():
self._delete_user()
self._delete_temp_url()
self._delete_queue()
try:
self.client().servers.delete(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return
return progress.ServerDeleteProgress(self.resource_id)
def check_delete_complete(self, prg):
if not prg:
return True
if not prg.image_complete:
image = self.client().images.get(prg.image_id)
if image.status in ('DELETED', 'ERROR'):
raise exception.Error(image.status)
elif image.status == 'ACTIVE':
prg.image_complete = True
if not self.handle_delete():
return True
return False
return self.client_plugin().check_delete_server_complete(
prg.server_id)
def handle_suspend(self):
'''
Suspend a server - note we do not wait for the SUSPENDED state,
this is polled for by check_suspend_complete in a similar way to the
create logic so we can take advantage of coroutines
'''
if self.resource_id is None:
raise exception.Error(_('Cannot suspend %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been suspended successful,
# no need to suspend again
if self.client_plugin().get_status(server) != 'SUSPENDED':
LOG.debug('suspending server %s' % self.resource_id)
server.suspend()
return server.id
def check_suspend_complete(self, server_id):
cp = self.client_plugin()
server = cp.fetch_server(server_id)
if not server:
return False
status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s'
% {'name': self.name, 'status': status})
if status in list(cp.deferred_server_statuses + ['ACTIVE']):
return status == 'SUSPENDED'
else:
exc = resource.ResourceUnknownStatus(
result=_('Suspend of server %s failed') % server.name,
resource_status=status)
raise exc
def handle_resume(self):
'''
Resume a server - note we do not wait for the ACTIVE state,
this is polled for by check_resume_complete in a similar way to the
create logic so we can take advantage of coroutines
'''
if self.resource_id is None:
raise exception.Error(_('Cannot resume %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been resumed successful,
# no need to resume again
if self.client_plugin().get_status(server) != 'ACTIVE':
LOG.debug('resuming server %s' % self.resource_id)
server.resume()
return server.id
def check_resume_complete(self, server_id):
return self.client_plugin()._check_active(server_id)
def handle_snapshot(self):
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
self.data_set('snapshot_image_id', image_id)
return image_id
def check_snapshot_complete(self, image_id):
image = self.client().images.get(image_id)
if image.status == 'ACTIVE':
return True
elif image.status == 'ERROR' or image.status == 'DELETED':
raise exception.Error(image.status)
return False
def handle_delete_snapshot(self, snapshot):
image_id = snapshot['resource_data'].get('snapshot_image_id')
try:
self.client().images.delete(image_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
def handle_restore(self, defn, restore_data):
image_id = restore_data['resource_data']['snapshot_image_id']
props = function.resolve(self.properties.data)
props[self.IMAGE] = image_id
return defn.freeze(properties=props)
def resource_mapping():
return {
'OS::Nova::Server': Server,
}
| apache-2.0 | -4,687,122,793,121,443,000 | 40.828829 | 79 | 0.53683 | false | 4.502771 | true | false | false |
line72/subte | libsubte/interface/StopMarker.py | 1 | 9828 | #
# Copyright (C) 2012 - Marcus Dillavou
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import math
import weakref
from gi.repository import Gtk, Champlain, Clutter, GLib
import libsubte
import shapes
class StopMarker(Champlain.CustomMarker):
def __init__(self, gtmap, stop):
Champlain.CustomMarker.__init__(self)
self._gtmap = None
self.gtmap = gtmap
self._stop = None
self.stop = stop
self.full_picture_box = None
self.unselected_color = Clutter.Color.new(0xf0, 0x02, 0xf0, 0xbb)
self.picture_color = Clutter.Color.new(0xef, 0xe4, 0x35, 0xbb)
self.modified_color = Clutter.Color.new(0xff, 0x10, 0x28, 0xbb)
self.route_color = Clutter.Color.new(0x0d, 0x9a, 0x27, 0xbb)
self.selected_color = Clutter.Color.new(0xfd, 0xfd, 0x02, 0xbb)
# draw our clickable marker
self.marker = Clutter.Actor()
self.marker.set_background_color(self.unselected_color)
self.marker.set_size(16, 16)
self.marker.set_position(0, 0)
self.marker.set_anchor_point(8, 8)
self.marker.set_reactive(True)
self.add_actor(self.marker)
self.marker.show()
self._visible = False
self.set_location(self.stop.latitude, self.stop.longitude)
# trying to capture it, then make us emit a signal doesn't
# seem to be working
#!lukstafi -- changed button-release to button-press
# and uncommented next line
self.marker.connect('button-press-event', self.on_click)
self.set_reactive(False)
@property
def gtmap(self):
if self._gtmap:
return self._gtmap()
return None
@gtmap.setter
def gtmap(self, m):
if m:
self._gtmap = weakref.ref(m)
else:
self._gtmap = None
@property
def stop(self):
if self._stop:
return self._stop()
return None
@stop.setter
def stop(self, m):
if m:
self._stop = weakref.ref(m)
else:
self._stop = None
def selected(self, status):
if status:
self.marker.set_background_color(self.selected_color)
else:
self.marker.set_background_color(self.unselected_color)
return True
def clicked(self, status):
print 'StopMarker.clicked status=', status
if status == self._visible: # nothing to do here
return True
if status:
self.show()
else:
self.hide()
return True
def on_click(self, actor, event, user_data = None):
#!mwd - this doesn't work :(
print 'StopMarker.on_click (no emitting)', actor, event
#!lukstafi - commented out
#self.emit('button-press-event', event)
#!lukstafi - instead of signals we self-call and invoke the hook
self.clicked(True)
if libsubte.Stop.activate_stop_hook:
libsubte.Stop.activate_stop_hook(self.stop)
return True
def on_expand_picture(self, actor, event, picture):
self.full_picture_box = Clutter.Texture()
self.full_picture_box.set_from_file(picture.image)
self.full_picture_box.set_keep_aspect_ratio(True)
size = self.gtmap.get_allocated_width(), self.gtmap.get_allocated_height()
r1 = size[0] / float(size[1])
size2 = self.full_picture_box.get_base_size()
if picture.orientation == 0 or picture.orientation == 180:
r2 = size2[0] / float(size2[1])
else:
r2 = size2[1] / float(size2[0])
self.full_picture_box.set_position(0, 0)
self.full_picture_box.set_z_rotation_from_gravity(picture.orientation, Clutter.Gravity.CENTER)
if r1 > r2: # use width
w = size[1] * r2
h = size[1]
else: # use height
w = size[0]
h = size[0] / r2
if picture.orientation != 0 and picture.orientation != 180:
w, h = h, w # reverse
self.full_picture_box.set_size(w, h)
self.full_picture_box.set_reactive(True)
#!lukstafi -- changed button-release to button-press
self.full_picture_box.connect('button-press-event', self.on_close_picture)
self.full_picture_box.show_all()
self.gtmap.show_image(self.full_picture_box)
return False
def on_close_picture(self, actor, event):
if self.full_picture_box:
self.gtmap.remove_image(self.full_picture_box)
self.full_picture_box.hide_all()
self.full_picture_box = None
return False
def show(self):
self.gtmap.unshow_stop_info()
width = 500
height = 200
# our meta info
group = Clutter.Group()
group.set_position(8, -8)
group.set_anchor_point(width / 2, height)
# just drawn a rectange or something
rect = shapes.Bubble()
c = Clutter.Color.new(0xde, 0xde, 0xde, 0xfe)
rect.set_color(c)
rect.set_has_outline(True)
rect.set_outline_color(Clutter.Color.new(0x00, 0x00, 0x00, 0xff))
rect.set_size(width, height)
rect.set_position(0, 8)
rect.set_anchor_point(0, 0)
rect.set_has_shadow(True)
group.add_child(rect)
name = Clutter.Text()
if self.stop.name:
name.set_markup('<markup><b>%s</b></markup>' % self.stop.name.replace('&', '&'))
else:
name.set_markup('<markup><b>%s</b></markup>' % self.stop.stop_id)
name.set_size(400, 25)
name.set_position(10, 15)
name.set_anchor_point(0, 0)
group.add_child(name)
info = Clutter.Text()
info.set_use_markup(True)
info.set_text('')
info.set_size(200, 75)
info.set_position(10, 50)
info.set_anchor_point(0, 0)
group.add_child(info)
info.set_markup('<markup><b>Latitude:</b> %s\n<b>Longitude:</b> %s</markup>' % (self.stop.latitude, self.stop.longitude))
routes = Clutter.Text()
if len(self.stop.trip_routes) > 0:
route_names = ', '.join([x.route.short_name for x in self.stop.trip_routes])
else:
route_names = 'None'
routes.set_markup('<markup><b>Routes:</b> %s</markup>' % route_names)
routes.set_size(200, 75)
routes.set_position(10, 100)
routes.set_anchor_point(0, 0)
group.add_child(routes)
# see if we have a picture (or more)
if len(self.stop.pictures) > 0:
try:
picture_box = Clutter.Texture()
# just use the first picture for now
picture = self.stop.pictures[0]
if picture.thumbnail:
picture_box.set_from_file(picture.thumbnail)
else:
picture_box.set_from_file(picture.image)
w, h = picture_box.get_base_size()
picture_box.set_keep_aspect_ratio(True)
picture_box.set_anchor_point(0, 0)
if picture.orientation in (90, -90):
#!mwd - I have no idea how the fuck clutter is rotation this
# It seems as though the bounding box doesn't change
# so I'm just making up some position numbers
picture_box.set_width(100)
picture_box.set_position(width - ((h/w) * 100) - (w/2) - 45, 60)
picture_box.set_z_rotation_from_gravity(picture.orientation, Clutter.Gravity.CENTER)
else:
picture_box.set_height(100)
picture_box.set_position(width - ((w/h) * 100) - (w/2) - 25, 50)
#!lukstafi -- changed button-release to button-press
picture_box.connect('button-press-event', self.on_expand_picture, picture)
picture_box.set_reactive(True)
group.add_child(picture_box)
except GLib.GError, e:
print >> sys.stderr, 'Error loading image', e
self.gtmap.show_popup(self, group)
self._visible = True
def hide(self):
self.gtmap.unshow_popup(self)
self._visible = False
self._update_color()
def update(self):
self._update_color()
if self._visible:
self.show()
def _update_color(self):
if self.stop:
if len(self.stop.trip_routes) > 0:
# we have routes associated with us
self.marker.set_background_color(self.route_color)
return
elif len(self.stop.pictures) > 0:
if self.stop.name != None and len(self.stop.name) > 0:
# picture and we have a name
self.marker.set_background_color(self.modified_color)
else:
# we have picture associated with us, but no name
self.marker.set_background_color(self.picture_color)
return
# default color
self.marker.set_background_color(self.unselected_color)
| gpl-3.0 | -5,898,807,832,328,061,000 | 32.889655 | 129 | 0.577941 | false | 3.621223 | false | false | false |
jalabort/ijcv-2014-aam | aam/image/test/image_test.py | 1 | 18144 | import warnings
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from nose.tools import raises
from menpo.testing import is_same_array
from menpo.image import BooleanImage, MaskedImage, Image
@raises(ValueError)
def test_create_1d_error():
Image(np.ones(1))
def test_image_n_elements():
image = Image(np.ones((10, 10, 3)))
assert(image.n_elements == 10 * 10 * 3)
def test_image_width():
image = Image(np.ones((6, 4, 3)))
assert(image.width == 4)
def test_image_height():
image = Image(np.ones((6, 4, 3)))
assert(image.height == 6)
def test_image_blank():
image = Image(np.zeros((6, 4, 1)))
image_blank = Image.blank((6, 4))
assert(np.all(image_blank.pixels == image.pixels))
def test_image_blank_fill():
image = Image(np.ones((6, 4, 1)) * 7)
image_blank = Image.blank((6, 4), fill=7)
assert(np.all(image_blank.pixels == image.pixels))
def test_image_blank_n_channels():
image = Image(np.zeros((6, 4, 7)))
image_blank = Image.blank((6, 4), n_channels=7)
assert(np.all(image_blank.pixels == image.pixels))
def test_image_centre():
pixels = np.ones((10, 20, 1))
image = Image(pixels)
assert(np.all(image.centre == np.array([5, 10])))
def test_image_str_shape_4d():
pixels = np.ones((10, 20, 11, 12, 1))
image = Image(pixels)
assert(image._str_shape == '10 x 20 x 11 x 12')
def test_image_str_shape_2d():
pixels = np.ones((10, 20, 1))
image = Image(pixels)
assert(image._str_shape == '20W x 10H')
def test_image_as_vector():
pixels = np.random.rand(10, 20, 1)
image = Image(pixels)
assert(np.all(image.as_vector() == pixels.ravel()))
def test_image_as_vector_keep_channels():
pixels = np.random.rand(10, 20, 2)
image = Image(pixels)
assert(np.all(image.as_vector(keep_channels=True) ==
pixels.reshape([-1, 2])))
def test_image_from_vector():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 2)
image = Image(pixels)
image2 = image.from_vector(pixels2.ravel())
assert(np.all(image2.pixels == pixels2))
def test_image_from_vector_custom_channels():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 3)
image = Image(pixels)
image2 = image.from_vector(pixels2.ravel(), n_channels=3)
assert(np.all(image2.pixels == pixels2))
def test_image_from_vector_no_copy():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 2)
image = Image(pixels)
image2 = image.from_vector(pixels2.ravel(), copy=False)
assert(is_same_array(image2.pixels, pixels2))
def test_image_from_vector_inplace_no_copy():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 2)
image = Image(pixels)
image.from_vector_inplace(pixels2.ravel(), copy=False)
assert(is_same_array(image.pixels, pixels2))
def test_image_from_vector_inplace_no_copy_warning():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 2)
image = Image(pixels)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
image.from_vector_inplace(pixels2.ravel()[::-1], copy=False)
assert len(w) == 1
def test_image_from_vector_inplace_copy_default():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 2)
image = Image(pixels)
image.from_vector_inplace(pixels2.ravel())
assert(not is_same_array(image.pixels, pixels2))
def test_image_from_vector_inplace_copy_explicit():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 2)
image = Image(pixels)
image.from_vector_inplace(pixels2.ravel(), copy=True)
assert(not is_same_array(image.pixels, pixels2))
def test_image_from_vector_custom_channels_no_copy():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 3)
image = Image(pixels)
image2 = image.from_vector(pixels2.ravel(), n_channels=3, copy=False)
assert(is_same_array(image2.pixels, pixels2))
@raises(ValueError)
def test_boolean_image_wrong_round():
BooleanImage.blank((12, 12), round='ads')
def test_boolean_image_proportion_true():
image = BooleanImage.blank((10, 10))
image.pixels[:7] = False
assert(image.proportion_true == 0.3)
def test_boolean_image_proportion_false():
image = BooleanImage.blank((10, 10))
image.pixels[:7] = False
assert(image.proportion_false == 0.7)
def test_boolean_image_proportion_sums():
image = BooleanImage.blank((10, 10))
image.pixels[:7] = False
assert(image.proportion_true + image.proportion_false == 1)
def test_boolean_image_false_indices():
image = BooleanImage.blank((2, 3))
image.pixels[0, 1] = False
image.pixels[1, 2] = False
assert(np.all(image.false_indices == np.array([[0, 1],
[1, 2]])))
def test_boolean_image_false_indices():
image = BooleanImage.blank((2, 3))
assert(image.__str__() == '3W x 2H 2D mask, 100.0% of which is True')
def test_boolean_image_from_vector():
vector = np.zeros(16, dtype=np.bool)
image = BooleanImage.blank((4, 4))
image2 = image.from_vector(vector)
assert(np.all(image2.as_vector() == vector))
def test_boolean_image_from_vector_no_copy():
vector = np.zeros(16, dtype=np.bool)
image = BooleanImage.blank((4, 4))
image2 = image.from_vector(vector, copy=False)
assert(is_same_array(image2.pixels.ravel(), vector))
def test_boolean_image_from_vector_no_copy_raises():
vector = np.zeros(16, dtype=np.bool)
image = BooleanImage.blank((4, 4))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
image.from_vector(vector[::-1], copy=False)
assert len(w) == 1
def test_boolean_image_invert_inplace():
image = BooleanImage.blank((4, 4))
image.invert_inplace()
assert(np.all(image.pixels == False))
def test_boolean_image_invert_inplace_double_noop():
image = BooleanImage.blank((4, 4))
image.invert_inplace()
image.invert_inplace()
assert(np.all(image.pixels == True))
def test_boolean_image_invert():
image = BooleanImage.blank((4, 4))
image2 = image.invert()
assert(np.all(image.pixels == True))
assert(np.all(image2.pixels == False))
def test_boolean_bounds_false():
mask = BooleanImage.blank((8, 8), fill=True)
mask.pixels[1, 2] = False
mask.pixels[5, 4] = False
mask.pixels[3:2, 3] = False
min_b, max_b = mask.bounds_false()
assert(np.all(min_b == np.array([1, 2])))
assert(np.all(max_b == np.array([5, 4])))
@raises(ValueError)
def test_boolean_prevent_order_kwarg():
mask = BooleanImage.blank((8, 8), fill=True)
mask.warp_to(mask, None, order=4)
def test_create_image_copy_false():
pixels = np.ones((100, 100, 1))
image = Image(pixels, copy=False)
assert (is_same_array(image.pixels, pixels))
def test_create_image_copy_true():
pixels = np.ones((100, 100, 1))
image = Image(pixels)
assert (not is_same_array(image.pixels, pixels))
def test_create_image_copy_false_not_c_contiguous():
pixels = np.ones((100, 100, 1), order='F')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
Image(pixels, copy=False)
assert(len(w) == 1)
def mask_image_3d_test():
mask_shape = (120, 121, 13)
mask_region = np.ones(mask_shape)
return BooleanImage(mask_region)
def test_mask_creation_basics():
mask_shape = (120, 121, 3)
mask_region = np.ones(mask_shape)
mask = BooleanImage(mask_region)
assert_equal(mask.n_channels, 1)
assert_equal(mask.n_dims, 3)
assert_equal(mask.shape, mask_shape)
def test_mask_blank():
mask = BooleanImage.blank((56, 12, 3))
assert (np.all(mask.pixels))
def test_boolean_copy_false_boolean():
mask = np.zeros((10, 10), dtype=np.bool)
boolean_image = BooleanImage(mask, copy=False)
assert (is_same_array(boolean_image.pixels, mask))
def test_boolean_copy_true():
mask = np.zeros((10, 10), dtype=np.bool)
boolean_image = BooleanImage(mask)
assert (not is_same_array(boolean_image.pixels, mask))
def test_boolean_copy_false_non_boolean():
mask = np.zeros((10, 10))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
BooleanImage(mask, copy=False)
assert(len(w) == 1)
def test_mask_blank_rounding_floor():
mask = BooleanImage.blank((56.1, 12.1), round='floor')
assert_allclose(mask.shape, (56, 12))
def test_mask_blank_rounding_ceil():
mask = BooleanImage.blank((56.1, 12.1), round='ceil')
assert_allclose(mask.shape, (57, 13))
def test_mask_blank_rounding_round():
mask = BooleanImage.blank((56.1, 12.6), round='round')
assert_allclose(mask.shape, (56, 13))
def test_mask_blank_false_fill():
mask = BooleanImage.blank((56, 12, 3), fill=False)
assert (np.all(~mask.pixels))
def test_mask_n_true_n_false():
mask = BooleanImage.blank((64, 14), fill=False)
assert_equal(mask.n_true, 0)
assert_equal(mask.n_false, 64 * 14)
mask.mask[0, 0] = True
mask.mask[9, 13] = True
assert_equal(mask.n_true, 2)
assert_equal(mask.n_false, 64 * 14 - 2)
def test_mask_true_indices():
mask = BooleanImage.blank((64, 14, 51), fill=False)
mask.mask[0, 2, 5] = True
mask.mask[5, 13, 4] = True
true_indices = mask.true_indices
true_indices_test = np.array([[0, 2, 5], [5, 13, 4]])
assert_equal(true_indices, true_indices_test)
def test_mask_false_indices():
mask = BooleanImage.blank((64, 14, 51), fill=True)
mask.mask[0, 2, 5] = False
mask.mask[5, 13, 4] = False
false_indices = mask.false_indices
false_indices_test = np.array([[0, 2, 5], [5, 13, 4]])
assert_equal(false_indices, false_indices_test)
def test_mask_true_bounding_extent():
mask = BooleanImage.blank((64, 14, 51), fill=False)
mask.mask[0, 13, 5] = True
mask.mask[5, 2, 4] = True
tbe = mask.bounds_true()
true_extends_mins = np.array([0, 2, 4])
true_extends_maxs = np.array([5, 13, 5])
assert_equal(tbe[0], true_extends_mins)
assert_equal(tbe[1], true_extends_maxs)
def test_3channel_image_creation():
pixels = np.ones((120, 120, 3))
MaskedImage(pixels)
def test_no_channels_image_creation():
pixels = np.ones((120, 120))
MaskedImage(pixels)
def test_create_MaskedImage_copy_false_mask_array():
pixels = np.ones((100, 100, 1))
mask = np.ones((100, 100), dtype=np.bool)
image = MaskedImage(pixels, mask=mask, copy=False)
assert (is_same_array(image.pixels, pixels))
assert (is_same_array(image.mask.pixels, mask))
def test_create_MaskedImage_copy_false_mask_BooleanImage():
pixels = np.ones((100, 100, 1))
mask = np.ones((100, 100), dtype=np.bool)
mask_image = BooleanImage(mask, copy=False)
image = MaskedImage(pixels, mask=mask_image, copy=False)
assert (is_same_array(image.pixels, pixels))
assert (is_same_array(image.mask.pixels, mask))
def test_create_MaskedImage_copy_true_mask_array():
pixels = np.ones((100, 100))
mask = np.ones((100, 100), dtype=np.bool)
image = MaskedImage(pixels, mask=mask)
assert (not is_same_array(image.pixels, pixels))
assert (not is_same_array(image.mask.pixels, mask))
def test_create_MaskedImage_copy_true_mask_BooleanImage():
pixels = np.ones((100, 100, 1))
mask = np.ones((100, 100), dtype=np.bool)
mask_image = BooleanImage(mask, copy=False)
image = MaskedImage(pixels, mask=mask_image, copy=True)
assert (not is_same_array(image.pixels, pixels))
assert (not is_same_array(image.mask.pixels, mask))
def test_2d_crop_without_mask():
pixels = np.ones((120, 120, 3))
im = MaskedImage(pixels)
cropped_im = im.crop([10, 50], [20, 60])
assert (cropped_im.shape == (10, 10))
assert (cropped_im.n_channels == 3)
assert (np.alltrue(cropped_im.shape))
def test_2d_crop_with_mask():
pixels = np.ones((120, 120, 3))
mask = np.zeros_like(pixels[..., 0])
mask[10:100, 20:30] = 1
im = MaskedImage(pixels, mask=mask)
cropped_im = im.crop([0, 0], [20, 60])
assert (cropped_im.shape == (20, 60))
assert (np.alltrue(cropped_im.shape))
def test_normalize_std_default():
pixels = np.ones((120, 120, 3))
pixels[..., 0] = 0.5
pixels[..., 1] = 0.2345
image = MaskedImage(pixels)
image.normalize_std_inplace()
assert_allclose(np.mean(image.pixels), 0, atol=1e-10)
assert_allclose(np.std(image.pixels), 1)
def test_normalize_norm_default():
pixels = np.ones((120, 120, 3))
pixels[..., 0] = 0.5
pixels[..., 1] = 0.2345
image = MaskedImage(pixels)
image.normalize_norm_inplace()
assert_allclose(np.mean(image.pixels), 0, atol=1e-10)
assert_allclose(np.linalg.norm(image.pixels), 1)
@raises(ValueError)
def test_normalize_std_no_variance_exception():
pixels = np.ones((120, 120, 3))
pixels[..., 0] = 0.5
pixels[..., 1] = 0.2345
image = MaskedImage(pixels)
image.normalize_std_inplace(mode='per_channel')
@raises(ValueError)
def test_normalize_norm_zero_norm_exception():
pixels = np.zeros((120, 120, 3))
image = MaskedImage(pixels)
image.normalize_norm_inplace(mode='per_channel')
def test_normalize_std_per_channel():
pixels = np.random.randn(120, 120, 3)
pixels[..., 1] *= 7
pixels[..., 0] += -14
pixels[..., 2] /= 130
image = MaskedImage(pixels)
image.normalize_std_inplace(mode='per_channel')
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=0), 0, atol=1e-10)
assert_allclose(
np.std(image.as_vector(keep_channels=True), axis=0), 1)
def test_normalize_norm_per_channel():
pixels = np.random.randn(120, 120, 3)
pixels[..., 1] *= 7
pixels[..., 0] += -14
pixels[..., 2] /= 130
image = MaskedImage(pixels)
image.normalize_norm_inplace(mode='per_channel')
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=0), 0, atol=1e-10)
assert_allclose(
np.linalg.norm(image.as_vector(keep_channels=True), axis=0), 1)
def test_normalize_std_masked():
pixels = np.random.randn(120, 120, 3)
pixels[..., 1] *= 7
pixels[..., 0] += -14
pixels[..., 2] /= 130
mask = np.zeros((120, 120))
mask[30:50, 20:30] = 1
image = MaskedImage(pixels, mask=mask)
image.normalize_std_inplace(mode='per_channel', limit_to_mask=True)
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=0), 0, atol=1e-10)
assert_allclose(
np.std(image.as_vector(keep_channels=True), axis=0), 1)
def test_normalize_norm_masked():
pixels = np.random.randn(120, 120, 3)
pixels[..., 1] *= 7
pixels[..., 0] += -14
pixels[..., 2] /= 130
mask = np.zeros((120, 120))
mask[30:50, 20:30] = 1
image = MaskedImage(pixels, mask=mask)
image.normalize_norm_inplace(mode='per_channel', limit_to_mask=True)
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=0), 0, atol=1e-10)
assert_allclose(
np.linalg.norm(image.as_vector(keep_channels=True), axis=0), 1)
def test_rescale_single_num():
image = MaskedImage(np.random.randn(120, 120, 3))
new_image = image.rescale(0.5)
assert_allclose(new_image.shape, (60, 60))
def test_rescale_tuple():
image = MaskedImage(np.random.randn(120, 120, 3))
new_image = image.rescale([0.5, 2.0])
assert_allclose(new_image.shape, (60, 240))
@raises(ValueError)
def test_rescale_negative():
image = MaskedImage(np.random.randn(120, 120, 3))
image.rescale([0.5, -0.5])
@raises(ValueError)
def test_rescale_negative_single_num():
image = MaskedImage(np.random.randn(120, 120, 3))
image.rescale(-0.5)
def test_rescale_boundaries_interpolation():
image = MaskedImage(np.random.randn(60, 60, 3))
for i in [x * 0.1 for x in range(1, 31)]:
image_rescaled = image.rescale(i)
assert_allclose(image_rescaled.mask.proportion_true, 1.0)
def test_resize():
image = MaskedImage(np.random.randn(120, 120, 3))
new_size = (250, 250)
new_image = image.resize(new_size)
assert_allclose(new_image.shape, new_size)
def test_as_greyscale_luminosity():
image = MaskedImage(np.ones([120, 120, 3]))
new_image = image.as_greyscale(mode='luminosity')
assert (new_image.shape == image.shape)
assert (new_image.n_channels == 1)
def test_as_greyscale_average():
image = MaskedImage(np.ones([120, 120, 3]))
new_image = image.as_greyscale(mode='average')
assert (new_image.shape == image.shape)
assert (new_image.n_channels == 1)
@raises(ValueError)
def test_as_greyscale_channels_no_index():
image = MaskedImage(np.ones([120, 120, 3]))
new_image = image.as_greyscale(mode='channel')
assert (new_image.shape == image.shape)
assert (new_image.n_channels == 1)
def test_as_greyscale_channels():
image = MaskedImage(np.random.randn(120, 120, 3))
new_image = image.as_greyscale(mode='channel', channel=0)
assert (new_image.shape == image.shape)
assert (new_image.n_channels == 1)
assert_allclose(new_image.pixels[..., 0], image.pixels[..., 0])
def test_as_pil_image_1channel():
im = MaskedImage(np.random.randn(120, 120, 1))
new_im = im.as_PILImage()
assert_allclose(np.asarray(new_im.getdata()).reshape(im.pixels.shape),
(im.pixels * 255).astype(np.uint8))
def test_as_pil_image_3channels():
im = MaskedImage(np.random.randn(120, 120, 3))
new_im = im.as_PILImage()
assert_allclose(np.asarray(new_im.getdata()).reshape(im.pixels.shape),
(im.pixels * 255).astype(np.uint8))
def test_image_gradient_sanity():
# Only a sanity check - does it run and generate sensible output?
image = Image(np.zeros([120, 120, 3]))
new_image = image.gradient()
assert(type(new_image) == Image)
assert(new_image.shape == image.shape)
assert(new_image.n_channels == image.n_channels * 2)
| bsd-2-clause | 23,672,551,868,986,324 | 29.089552 | 76 | 0.641534 | false | 3.011452 | true | false | false |
nevins-b/lemur | lemur/plugins/lemur_openssl/plugin.py | 1 | 4304 | """
.. module: lemur.plugins.lemur_openssl.plugin
:platform: Unix
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
from io import open
import subprocess
from flask import current_app
from lemur.utils import mktempfile, mktemppath
from lemur.plugins.bases import ExportPlugin
from lemur.plugins import lemur_openssl as openssl
from lemur.common.utils import get_psuedo_random_string
def run_process(command):
"""
Runs a given command with pOpen and wraps some
error handling around it.
:param command:
:return:
"""
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
current_app.logger.debug(command)
stdout, stderr = p.communicate()
if p.returncode != 0:
current_app.logger.debug(" ".join(command))
current_app.logger.error(stderr)
raise Exception(stderr)
def create_pkcs12(cert, chain, p12_tmp, key, alias, passphrase):
"""
Creates a pkcs12 formated file.
:param cert:
:param chain:
:param p12_tmp:
:param key:
:param alias:
:param passphrase:
"""
if isinstance(cert, bytes):
cert = cert.decode('utf-8')
if isinstance(chain, bytes):
chain = chain.decode('utf-8')
if isinstance(key, bytes):
key = key.decode('utf-8')
with mktempfile() as key_tmp:
with open(key_tmp, 'w') as f:
f.write(key)
# Create PKCS12 keystore from private key and public certificate
with mktempfile() as cert_tmp:
with open(cert_tmp, 'w') as f:
if chain:
f.writelines([cert.strip() + "\n", chain.strip() + "\n"])
else:
f.writelines([cert.strip() + "\n"])
run_process([
"openssl",
"pkcs12",
"-export",
"-name", alias,
"-in", cert_tmp,
"-inkey", key_tmp,
"-out", p12_tmp,
"-password", "pass:{}".format(passphrase)
])
class OpenSSLExportPlugin(ExportPlugin):
title = 'OpenSSL'
slug = 'openssl-export'
description = 'Is a loose interface to openssl and support various formats'
version = openssl.VERSION
author = 'Kevin Glisson'
author_url = 'https://github.com/netflix/lemur'
options = [
{
'name': 'type',
'type': 'select',
'required': True,
'available': ['PKCS12 (.p12)'],
'helpMessage': 'Choose the format you wish to export',
},
{
'name': 'passphrase',
'type': 'str',
'required': False,
'helpMessage': 'If no passphrase is given one will be generated for you, we highly recommend this. Minimum length is 8.',
'validation': ''
},
{
'name': 'alias',
'type': 'str',
'required': False,
'helpMessage': 'Enter the alias you wish to use for the keystore.',
}
]
def export(self, body, chain, key, options, **kwargs):
"""
Generates a Java Keystore or Truststore
:param key:
:param chain:
:param body:
:param options:
:param kwargs:
"""
if self.get_option('passphrase', options):
passphrase = self.get_option('passphrase', options)
else:
passphrase = get_psuedo_random_string()
if self.get_option('alias', options):
alias = self.get_option('alias', options)
else:
alias = "blah"
type = self.get_option('type', options)
with mktemppath() as output_tmp:
if type == 'PKCS12 (.p12)':
if not key:
raise Exception("Private Key required by {0}".format(type))
create_pkcs12(body, chain, output_tmp, key, alias, passphrase)
extension = "p12"
else:
raise Exception("Unable to export, unsupported type: {0}".format(type))
with open(output_tmp, 'rb') as f:
raw = f.read()
return extension, passphrase, raw
| apache-2.0 | 4,822,535,974,338,497,000 | 28.278912 | 133 | 0.549954 | false | 4.014925 | false | false | false |
alextingle/autoclapper | autoclapper.py | 1 | 7762 | #! /usr/bin/env python
## Support for byteswapping audio streams (needed for AIFF format).
_typecode = {2:'h'}
def _init_typecode():
import array
for t in ('i', 'l'):
a = array.array(t)
if a.itemsize==4:
_typecode[4] = t
return
import sys
print "Can't find array typecode for 4 byte ints."
sys.exit(1)
_init_typecode()
def _byteswap(s,n):
"""Byteswap stream s, which is of width n bytes. Does nothing if n is 1.
Only supports widths listed in _typecode (2 & 4)."""
if n==1:
return s
import array
a = array.array( _typecode[n], s )
a.byteswap()
return a.tostring()
def _null(s,n):
"""Do nothing to stream s, which is of width n. See also: _byteswap(s,n)"""
return s
class SoundFile(object):
'''Wrapper for PCM sound stream, can be AIFF (aifc module)
or WAV (wave module).'''
def __init__(self, fname, template_obj=None):
if fname[-5:].lower() == '.aiff':
self._mod = __import__('aifc')
self._conv = _byteswap # AIFF is big-endian.
elif fname[-4:].lower() == '.wav':
self._mod = __import__('wave')
self._conv = _null
else:
print 'Unknown extension:', fname
import sys
sys.exit(1)
if template_obj:
# We will create & write to this file.
self.init_from_template(fname, template_obj)
else:
# We load from this file.
self.load(fname)
def bytes_per_frame(self):
return self.stream.getsampwidth() * self.stream.getnchannels()
def bytes_per_second(self):
return self.stream.getframerate() * self.bytes_per_frame()
def load(self, in_fname):
print 'load', self._mod.__name__, in_fname
self.stream = self._mod.open(in_fname, 'rb')
def read_lin(self):
fragment = self.stream.readframes( self.stream.getnframes() )
return self._conv(fragment, self.stream.getsampwidth())
def init_from_template(self, out_fname, template_obj):
print 'create', self._mod.__name__, out_fname
self.stream = self._mod.open(out_fname, 'wb')
self.stream.setnchannels( template_obj.stream.getnchannels() )
self.stream.setsampwidth( template_obj.stream.getsampwidth() )
self.stream.setframerate( template_obj.stream.getframerate() )
def write_lin(self, fragment):
self.stream.writeframes(self._conv(fragment, self.stream.getsampwidth()))
def close(self):
self.stream.close()
def coerce_lin(source_aiff, template_obj):
'''Read data from source, and convert it to match template's params.'''
import audioop
frag = source_aiff.read_lin()
Ss = source_aiff.stream
St = template_obj.stream
# Sample width
if Ss.getsampwidth() != St.getsampwidth():
print 'coerce sampwidth %i -> %i' %(Ss.getsampwidth(), St.getsampwidth())
frag = audioop.lin2lin(frag, Ss.getsampwidth(), St.getsampwidth())
width = St.getsampwidth()
# Channels
if Ss.getnchannels() != St.getnchannels():
print 'coerce nchannels %i -> %i' %(Ss.getnchannels(), St.getnchannels())
if Ss.getnchannels()==2 and St.getnchannels()==1:
frag = audioop.tomono(frag, width, 0.5, 0.5)
elif Ss.getnchannels()==1 and St.getnchannels()==2:
frag = audioop.tostereo(frag, width, 1.0, 1.0)
else:
print "Err: can't match channels"
# Frame rate
if Ss.getframerate() != St.getframerate():
print 'coerce framerate %i -> %i' %(Ss.getframerate(), St.getframerate())
frag,state = audioop.ratecv(
frag, width,
St.getnchannels(),
Ss.getframerate(), # in rate
St.getframerate(), # out rate
None, 2,1
)
return frag
def findfit(scratch_frag, final_frag, sound_file):
'''Calculates the offset (in seconds) between scratch_frag & final_frag.
Both fragments are assumed to contain the same, loud "clapper" event.
The SoundFile object is used for common stream parameters.'''
import audioop
nchannels = sound_file.stream.getnchannels()
framerate = sound_file.stream.getframerate()
width = sound_file.stream.getsampwidth()
assert(width==2)
# Simplify the sound streams to make it quicker to find a match.
# Left channel only.
if nchannels > 1:
scratch_frag_ = audioop.tomono(scratch_frag, width, 1, 0)
final_frag_ = audioop.tomono(final_frag, width, 1, 0)
else:
scratch_frag_ = scratch_frag
final_frag_ = final_frag
nchannels_ = 1
# Downsample to 8000/sec
framerate_ = 8000
scratch_frag_,state =\
audioop.ratecv(scratch_frag_, width, nchannels_, framerate, framerate_, None)
final_frag_,state =\
audioop.ratecv(final_frag_, width, nchannels_, framerate, framerate_, None)
bytes_per_second_ = nchannels_ * framerate_ * width
# Find the clapper in final
length_samples = int(0.001 * framerate * nchannels_) # 0.1 sec
final_off_samples = audioop.findmax(final_frag_, length_samples)
# Search for a 2 second 'needle' centred on where we found the 'clapper'
needle_bytes = 2 * bytes_per_second_
b0 = max(0, final_off_samples * width - int(needle_bytes/2))
print '"clapper" at final:', 1.0*b0/bytes_per_second_, 'sec'
b1 = b0 + needle_bytes
final_clapper_frag = final_frag_[b0:b1]
scratch_off_samples,factor = audioop.findfit(scratch_frag_, final_clapper_frag)
scratch_off_bytes = scratch_off_samples * width
print 'match at scratch:', 1.0*scratch_off_bytes/bytes_per_second_, 'sec', " factor =",factor
# Calculate the offset (shift) between the two fragments.
shift_sec = (scratch_off_bytes - b0) * 1.0 / bytes_per_second_
print 'shift =', shift_sec, 'seconds'
return shift_sec
def autoclapper(in_scratch_fname, in_final_fname, out_fname):
"""Read WAV- or AIFF-format files in_scratch_fname (a scratch audio track,
taken from a video) & in_final_fname (a final-quality audio track of
the same scene). Shift the 'final' stream to match the 'scratch' track,
and write it out to out_fname. The result is a file that can be used
directly as the video's sound-track."""
# Read in the input streams.
scratch = SoundFile( in_scratch_fname )
final = SoundFile( in_final_fname )
print 'scratch', scratch.stream.getparams()
print 'final ', final.stream.getparams()
scratch_frag = coerce_lin(scratch, final)
final_frag = final.read_lin()
## Shift final_frag to match scratch_frag
shift_sec = findfit(scratch_frag, final_frag, final)
shift_frames = int(shift_sec * final.stream.getframerate())
shift_bytes = shift_frames * final.bytes_per_frame()
print 'shift', shift_bytes, 'bytes'
if shift_bytes > 0:
final_frag = '\0' * shift_bytes + final_frag
elif shift_bytes < 0:
final_frag = final_frag[-shift_bytes:]
## Set final_frag length to match scratch_frag
if len(final_frag) > len(scratch_frag):
final_frag = final_frag[:len(scratch_frag)]
elif len(final_frag) < len(scratch_frag):
final_frag += '\0' * (len(scratch_frag) - len(final_frag))
# Write out the result.
sink = SoundFile( out_fname, final )
sink.write_lin( final_frag )
sink.close()
if __name__=='__main__':
import sys
if sys.argv[1] in ('-h', '--help', '-?'):
print 'syntax: python autoclapper.py IN_SCRATCH_FNAME IN_FINAL_FNAME OUT_FNAME'
print
print autoclapper.__doc__
print """
You can use "avconv" (or "ffmpeg") to extract audio tracks from video.
Example:
$ avconv -i raw_video.avi scratch.wav
$ python autoclapper.py scratch.wav raw_final.wav synced_final.wav
$ avconv -i raw_video.avi -i synced_final.wav -map 0:0 -map 1:0 -codec copy video.avi
"""
sys.exit(0)
in_scratch_fname = sys.argv[1]
in_final_fname = sys.argv[2]
out_fname = sys.argv[3]
autoclapper(in_scratch_fname, in_final_fname, out_fname)
| agpl-3.0 | 5,807,491,492,278,945,000 | 32.456897 | 95 | 0.652409 | false | 3.230129 | false | false | false |
oneconvergence/group-based-policy | gbpservice/neutron/services/grouppolicy/extension_manager.py | 1 | 11825 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.openstack.common import log
from oslo.config import cfg
import stevedore
LOG = log.getLogger(__name__)
class ExtensionManager(stevedore.named.NamedExtensionManager):
"""Manage extension drivers using drivers."""
def __init__(self):
# Ordered list of extension drivers, defining
# the order in which the drivers are called.
self.ordered_ext_drivers = []
LOG.info(_("Configured extension driver names: %s"),
cfg.CONF.group_policy.extension_drivers)
super(ExtensionManager, self).__init__(
'gbpservice.neutron.group_policy.extension_drivers',
cfg.CONF.group_policy.extension_drivers,
invoke_on_load=True,
name_order=True)
LOG.info(_("Loaded extension driver names: %s"), self.names())
self._register_drivers()
def _register_drivers(self):
"""Register all extension drivers.
This method should only be called once in the ExtensionManager
constructor.
"""
for ext in self:
self.ordered_ext_drivers.append(ext)
LOG.info(_("Registered extension drivers: %s"),
[driver.name for driver in self.ordered_ext_drivers])
def initialize(self):
# Initialize each driver in the list.
for driver in self.ordered_ext_drivers:
LOG.info(_("Initializing extension driver '%s'"), driver.name)
driver.obj.initialize()
def extension_aliases(self):
exts = []
for driver in self.ordered_ext_drivers:
alias = driver.obj.extension_alias
exts.append(alias)
LOG.info(_("Got %(alias)s extension from driver '%(drv)s'"),
{'alias': alias, 'drv': driver.name})
return exts
def _call_on_ext_drivers(self, method_name, session, data, result):
"""Helper method for calling a method across all extension drivers."""
for driver in self.ordered_ext_drivers:
try:
getattr(driver.obj, method_name)(session, data, result)
except Exception:
LOG.exception(
_("Extension driver '%(name)s' failed in %(method)s"),
{'name': driver.name, 'method': method_name}
)
def process_create_policy_target(self, session, data, result):
"""Call all extension drivers during PT creation."""
self._call_on_ext_drivers("process_create_policy_target",
session, data, result)
def process_update_policy_target(self, session, data, result):
"""Call all extension drivers during PT update."""
self._call_on_ext_drivers("process_update_policy_target",
session, data, result)
def extend_policy_target_dict(self, session, result):
"""Call all extension drivers to extend PT dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_policy_target_dict(session, result)
def process_create_policy_target_group(self, session, data, result):
"""Call all extension drivers during PTG creation."""
self._call_on_ext_drivers("process_create_policy_target_group",
session, data, result)
def process_update_policy_target_group(self, session, data, result):
"""Call all extension drivers during PTG update."""
self._call_on_ext_drivers("process_update_policy_target_group",
session, data, result)
def extend_policy_target_group_dict(self, session, result):
"""Call all extension drivers to extend PTG dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_policy_target_group_dict(session, result)
def process_create_l2_policy(self, session, data, result):
"""Call all extension drivers during L2P creation."""
self._call_on_ext_drivers("process_create_l2_policy",
session, data, result)
def process_update_l2_policy(self, session, data, result):
"""Call all extension drivers during L2P update."""
self._call_on_ext_drivers("process_update_l2_policy",
session, data, result)
def extend_l2_policy_dict(self, session, result):
"""Call all extension drivers to extend L2P dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_l2_policy_dict(session, result)
def process_create_l3_policy(self, session, data, result):
"""Call all extension drivers during L3P creation."""
self._call_on_ext_drivers("process_create_l3_policy",
session, data, result)
def process_update_l3_policy(self, session, data, result):
"""Call all extension drivers during L3P update."""
self._call_on_ext_drivers("process_update_l3_policy",
session, data, result)
def extend_l3_policy_dict(self, session, result):
"""Call all extension drivers to extend L3P dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_l3_policy_dict(session, result)
def process_create_policy_classifier(self, session, data, result):
"""Call all extension drivers during PC creation."""
self._call_on_ext_drivers("process_create_policy_classifier",
session, data, result)
def process_update_policy_classifier(self, session, data, result):
"""Call all extension drivers during PC update."""
self._call_on_ext_drivers("process_update_policy_classifier",
session, data, result)
def extend_policy_classifier_dict(self, session, result):
"""Call all extension drivers to extend PC dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_policy_classifier_dict(session, result)
def process_create_policy_action(self, session, data, result):
"""Call all extension drivers during PA creation."""
self._call_on_ext_drivers("process_create_policy_action",
session, data, result)
def process_update_policy_action(self, session, data, result):
"""Call all extension drivers during PA update."""
self._call_on_ext_drivers("process_update_policy_action",
session, data, result)
def extend_policy_action_dict(self, session, result):
"""Call all extension drivers to extend PA dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_policy_action_dict(session, result)
def process_create_policy_rule(self, session, data, result):
"""Call all extension drivers during PR creation."""
self._call_on_ext_drivers("process_create_policy_rule",
session, data, result)
def process_update_policy_rule(self, session, data, result):
"""Call all extension drivers during PR update."""
self._call_on_ext_drivers("process_update_policy_rule",
session, data, result)
def extend_policy_rule_dict(self, session, result):
"""Call all extension drivers to extend PR dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_policy_rule_dict(session, result)
def process_create_policy_rule_set(self, session, data, result):
"""Call all extension drivers during PRS creation."""
self._call_on_ext_drivers("process_create_policy_rule_set",
session, data, result)
def process_update_policy_rule_set(self, session, data, result):
"""Call all extension drivers during PRS update."""
self._call_on_ext_drivers("process_update_policy_rule_set",
session, data, result)
def extend_policy_rule_set_dict(self, session, result):
"""Call all extension drivers to extend PRS dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_policy_rule_set_dict(session, result)
def process_create_network_service_policy(self, session, data, result):
"""Call all extension drivers during NSP creation."""
self._call_on_ext_drivers("process_create_network_service_policy",
session, data, result)
def process_update_network_service_policy(self, session, data, result):
"""Call all extension drivers during NSP update."""
self._call_on_ext_drivers("process_update_network_service_policy",
session, data, result)
def extend_network_service_policy_dict(self, session, result):
"""Call all extension drivers to extend NSP dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_network_service_policy_dict(session, result)
def process_create_external_segment(self, session, data, result):
"""Call all extension drivers during EP creation."""
self._call_on_ext_drivers("process_create_external_segment",
session, data, result)
def process_update_external_segment(self, session, data, result):
"""Call all extension drivers during EP update."""
self._call_on_ext_drivers("process_update_external_segment",
session, data, result)
def extend_external_segment_dict(self, session, result):
"""Call all extension drivers to extend EP dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_external_segment_dict(session, result)
def process_create_external_policy(self, session, data, result):
"""Call all extension drivers during EP creation."""
self._call_on_ext_drivers("process_create_external_policy",
session, data, result)
def process_update_external_policy(self, session, data, result):
"""Call all extension drivers during EP update."""
self._call_on_ext_drivers("process_update_external_policy",
session, data, result)
def extend_external_policy_dict(self, session, result):
"""Call all extension drivers to extend EP dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_external_policy_dict(session, result)
def process_create_nat_pool(self, session, data, result):
"""Call all extension drivers during NP creation."""
self._call_on_ext_drivers("process_create_nat_pool",
session, data, result)
def process_update_nat_pool(self, session, data, result):
"""Call all extension drivers during NP update."""
self._call_on_ext_drivers("process_update_nat_pool",
session, data, result)
def extend_nat_pool_dict(self, session, result):
"""Call all extension drivers to extend NP dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_nat_pool_dict(session, result) | apache-2.0 | 5,398,071,857,213,786,000 | 45.559055 | 78 | 0.622664 | false | 4.394277 | false | false | false |
dtroyer/python-openstacksdk | openstack/object_store/v1/account.py | 1 | 1764 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.object_store.v1 import _base
from openstack import resource
class Account(_base.BaseResource):
_custom_metadata_prefix = "X-Account-Meta-"
base_path = "/"
allow_get = True
allow_update = True
allow_head = True
#: The total number of bytes that are stored in Object Storage for
#: the account.
account_bytes_used = resource.Header("x-account-bytes-used", type=int)
#: The number of containers.
account_container_count = resource.Header("x-account-container-count",
type=int)
#: The number of objects in the account.
account_object_count = resource.Header("x-account-object-count", type=int)
#: The secret key value for temporary URLs. If not set,
#: this header is not returned by this operation.
meta_temp_url_key = resource.Header("x-account-meta-temp-url-key")
#: A second secret key value for temporary URLs. If not set,
#: this header is not returned by this operation.
meta_temp_url_key_2 = resource.Header("x-account-meta-temp-url-key-2")
#: The timestamp of the transaction.
timestamp = resource.Header("x-timestamp")
has_body = False
requires_id = False
| apache-2.0 | -5,670,804,540,234,342,000 | 38.2 | 78 | 0.696145 | false | 4.009091 | false | false | false |
titu1994/MobileNetworks | weights/remove_extra_class.py | 1 | 1279 | import h5py
'''
Place all the weight files here (should automatically be placed after running weight_load.py
for all the checkpoints, and then simply run this script to change the weights to support 1000
classes instead of 1001.
'''
base = "mobilenet_"
alphas = ["1_0", "7_5", "5_0", "2_5"]
sizes = [224, 192, 160, 128]
end_str = "_tf.h5"
for alpha in alphas:
for size in sizes:
fn = base + alpha + "_" + str(size) + end_str
print("Working on file : %s" % fn)
f = h5py.File(fn)
classification_layer = f.attrs['layer_names'][-3]
classification_dataset = f[classification_layer]
weights_name = b'conv_preds/kernel:0'
bias_name = b'conv_preds/bias:0'
weights = classification_dataset[weights_name][:]
bias = classification_dataset[bias_name][:]
# remove the first class
weights = weights[..., 1:]
bias = bias[1:]
del classification_dataset[weights_name]
classification_dataset.create_dataset(weights_name, data=weights)
del classification_dataset[bias_name]
classification_dataset.create_dataset(bias_name, data=bias)
f.close()
print("Finished processing weight file : %s" % (fn))
print("Finished processing all weights")
| apache-2.0 | 1,143,303,184,041,994,100 | 29.452381 | 95 | 0.637217 | false | 3.675287 | false | false | false |
ThomasColliers/whatmigrate | siteconnection.py | 1 | 5571 | # Class that handles What.CD authentication, can download torrents and can search the site log
import os,pycurl,urllib,re,sys,urllib2
from BeautifulSoup import BeautifulSoup
re_main = re.compile(r'<span style="color: red;">(.*?)</span>')
re_detail = re.compile(r' Torrent <a href="torrents\.php\?torrentid=\d+"> \d+</a> \((.*?)\) uploaded by <a href="user\.php\?id=\d+">.*?</a> was deleted by <a href="user\.php\?id=\d+">.*?</a> for the reason: (.*?)$')
re_replacement = re.compile(r'(.*?) \( <a href="torrents\.php\?torrentid=(\d+)">torrents\.php\?torrentid=\d+</a> \)')
class Receiver:
def __init__(self):
self.contents = ""
self.header = ""
def body_callback(self, buffer):
self.contents = self.contents + buffer
def header_callback(self,buffer):
self.header = self.header + buffer
class Connection:
def __init__(self,user,passw,use_ssl):
self.username = user
self.password = passw
self.logintries = 0
if(use_ssl): self.basepath = "https://ssl.what.cd/"
else: self.basepath = "http://what.cd/"
# Set up curl
self.rec = Receiver()
self.curl = pycurl.Curl()
self.curl.setopt(pycurl.FOLLOWLOCATION,1)
self.curl.setopt(pycurl.MAXREDIRS,5)
self.curl.setopt(pycurl.NOSIGNAL,1)
cookiefile = os.path.expanduser("~/.whatmigrate_cookiefile")
self.curl.setopt(pycurl.COOKIEFILE,cookiefile)
self.curl.setopt(pycurl.COOKIEJAR,cookiefile)
self.curl.setopt(pycurl.WRITEFUNCTION,self.rec.body_callback)
self.curl.setopt(pycurl.HEADERFUNCTION,self.rec.header_callback)
# to reset curl after each request
def clearCurl(self):
self.rec.contents = ""
self.rec.header = ""
self.curl.setopt(pycurl.POST,0)
self.curl.setopt(pycurl.POSTFIELDS,"")
# make request
def makeRequest(self,url,post = None):
# make request
self.clearCurl()
self.curl.setopt(pycurl.URL,url)
if(post):
self.curl.setopt(pycurl.POST,1)
self.curl.setopt(pycurl.POSTFIELDS,post)
self.curl.perform()
# check if logged in
if not self.rec.contents.find('id="loginform"') is -1:
self.logintries += 1
if(self.logintries > 1): sys.exit("Site login failed, check your username and password in your configuration file")
self.login()
return self.makeRequest(url,post)
# return result
return self.rec.contents
# login
def login(self):
self.makeRequest(self.basepath+"login.php",
urllib.urlencode([
("username",self.username),
("password",self.password),
("keeplogged",1),
("login","Log in !")
])
)
# strip html
def stripHTML(self,html):
return ''.join(BeautifulSoup(html).findAll(text=True))
# search torrents
def searchTorrents(self,searchstring):
html = self.makeRequest(self.basepath+"torrents.php?searchstr="+urllib.quote(searchstring))
soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES)
table = soup.find("table", {"id":"torrent_table"})
if not table: return False
groups = table.findAll("tr")
results = {}
for group in groups:
classes = group["class"].split(' ')
# parse the groups
if "group" in classes:
copy = unicode(group.findAll('td')[2])
copy = copy[0:copy.find('<span style="float:right;">')]
currentgroup = self.stripHTML(copy).strip()
results[currentgroup] = {}
# parse the edition
elif "edition" in classes:
currentedition = group.td.strong.find(text=True,recursive=False).strip()
if currentgroup: results[currentgroup][currentedition] = []
# parse the torrent
elif "group_torrent" in classes:
torrentdata = {}
torrentdata['format'] = group.td.find('a',recursive=False).text.strip()
torrentdata['size'] = group.findAll('td')[3].text.strip()
dlink = unicode(group.td.a)
regex = re.compile(r'id=(\d+)')
reresult = regex.search(dlink)
if reresult:
torrentdata['id'] = int(reresult.group(1));
else:
continue
if currentedition and currentgroup:
results[currentgroup][currentedition].append(torrentdata)
return results
# download a torrent file
def getTorrentFile(self,torrentid):
result = self.makeRequest(self.basepath+"torrents.php?torrentid=%s" % (torrentid,))
# process result
re_torrentlink = re.compile(r'torrents\.php\?action=download&id='+str(torrentid)+r'\&authkey=.+?&torrent_pass=\w+')
result = re_torrentlink.search(result)
if not result: sys.exit("Could not find torrent with id %s." % (torrentid,))
torrentlink = result.group().replace("&","&")
torrentdata = self.makeRequest(self.basepath+torrentlink)
# parse header to get filename
torrent_filename = torrentid
for line in iter(self.rec.header.splitlines()):
if 'filename=' in line:
torrent_filename = line[line.find('filename=')+10:-1]
return (torrent_filename, torrentdata)
def close(self):
self.curl.close()
| gpl-3.0 | 8,229,056,412,642,731,000 | 41.853846 | 215 | 0.588584 | false | 3.794959 | false | false | false |
stackforge/monasca-notification | monasca_notification/plugins/email_notifier.py | 1 | 11064 | # (C) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
# Copyright 2017 Fujitsu LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import email.header
import email.mime.text
import email.utils
import six
import smtplib
import time
from debtcollector import removals
from oslo_config import cfg
from monasca_notification.plugins import abstract_notifier
CONF = cfg.CONF
EMAIL_SINGLE_HOST_BASE = u'''On host "{hostname}" for target "{target_host}" {message}
Alarm "{alarm_name}" transitioned to the {state} state at {timestamp} UTC
alarm_id: {alarm_id}
Lifecycle state: {lifecycle_state}
Link: {link}
Link to Grafana: {grafana_url}
With dimensions:
{metric_dimensions}'''
EMAIL_MULTIPLE_HOST_BASE = u'''On host "{hostname}" {message}
Alarm "{alarm_name}" transitioned to the {state} state at {timestamp} UTC
alarm_id: {alarm_id}
Lifecycle state: {lifecycle_state}
Link: {link}
Link to Grafana: {grafana_url}
With dimensions:
{metric_dimensions}'''
EMAIL_NO_HOST_BASE = u'''On multiple hosts {message}
Alarm "{alarm_name}" transitioned to the {state} state at {timestamp} UTC
Alarm_id: {alarm_id}
Lifecycle state: {lifecycle_state}
Link: {link}
Link to Grafana: {grafana_url}
With dimensions
{metric_dimensions}'''
class EmailNotifier(abstract_notifier.AbstractNotifier):
type = 'email'
def __init__(self, log):
super(EmailNotifier, self).__init__()
self._log = log
self._smtp = None
@removals.remove(
message='Configuration of notifier is available through oslo.cfg',
version='1.9.0',
removal_version='3.0.0'
)
def config(self, config=None):
self._smtp_connect()
@property
def statsd_name(self):
return "sent_smtp_count"
def send_notification(self, notification):
"""Send the notification via email
Returns the True upon success, False upon failure
"""
# Get the "hostname" from the notification metrics if there is one
hostname = []
targethost = []
for metric in notification.metrics:
dimap = metric['dimensions']
if 'hostname' in dimap and not dimap['hostname'] in hostname:
hostname.append(dimap['hostname'])
if 'target_host' in dimap and not dimap['target_host'] in targethost:
targethost.append(dimap['target_host'])
# Generate the message
msg = self._create_msg(hostname, notification, targethost)
if not self._smtp and not self._smtp_connect():
return False
try:
self._sendmail(notification, msg)
return True
except smtplib.SMTPServerDisconnected:
self._log.warn('SMTP server disconnected. '
'Will reconnect and retry message.')
self._smtp_connect()
except smtplib.SMTPException:
self._email_error(notification)
return False
try:
self._sendmail(notification, msg)
return True
except smtplib.SMTPException:
self._email_error(notification)
return False
def _sendmail(self, notification, msg):
self._smtp.sendmail(CONF.email_notifier.from_addr,
notification.address,
msg.as_string())
self._log.debug("Sent email to {}, notification {}".format(notification.address,
notification.to_json()))
def _email_error(self, notification):
self._log.exception("Error sending Email Notification")
self._log.error("Failed email: {}".format(notification.to_json()))
def _smtp_connect(self):
"""Connect to the smtp server
"""
self._log.info("Connecting to Email Server {}".format(
CONF.email_notifier.server))
try:
smtp = smtplib.SMTP(CONF.email_notifier.server,
CONF.email_notifier.port,
timeout=CONF.email_notifier.timeout)
email_notifier_user = CONF.email_notifier.user
email_notifier_password = CONF.email_notifier.password
if email_notifier_user and email_notifier_password:
smtp.login(email_notifier_user,
email_notifier_password)
self._smtp = smtp
return True
except Exception:
self._log.exception("Unable to connect to email server.")
return False
def _create_msg(self, hostname, notification, targethost=None):
"""Create two kind of messages:
1. Notifications that include metrics with a hostname as a dimension.
There may be more than one hostname.
We will only report the hostname if there is only one.
2. Notifications that do not include metrics and therefore no hostname.
Example: API initiated changes.
* A third notification type which include metrics but do not include a hostname will
be treated as type #2.
"""
timestamp = time.asctime(time.gmtime(notification.alarm_timestamp))
alarm_seconds = notification.alarm_timestamp
alarm_ms = int(round(alarm_seconds * 1000))
graf_url = self._get_link_url(notification.metrics[0], alarm_ms)
dimensions = _format_dimensions(notification)
if len(hostname) == 1: # Type 1
if targethost:
text = EMAIL_SINGLE_HOST_BASE.format(
hostname=hostname[0],
target_host=targethost[0],
message=notification.message.lower(),
alarm_name=notification.alarm_name,
state=notification.state,
timestamp=timestamp,
alarm_id=notification.alarm_id,
metric_dimensions=dimensions,
link=notification.link,
grafana_url=graf_url,
lifecycle_state=notification.lifecycle_state
)
subject = u'{} {} "{}" for Host: {} Target: {}'.format(
notification.state, notification.severity,
notification.alarm_name, hostname[0],
targethost[0]
)
else:
text = EMAIL_MULTIPLE_HOST_BASE.format(
hostname=hostname[0],
message=notification.message.lower(),
alarm_name=notification.alarm_name,
state=notification.state,
timestamp=timestamp,
alarm_id=notification.alarm_id,
metric_dimensions=dimensions,
link=notification.link,
grafana_url=graf_url,
lifecycle_state=notification.lifecycle_state
)
subject = u'{} {} "{}" for Host: {}'.format(
notification.state, notification.severity,
notification.alarm_name, hostname[0])
else: # Type 2
text = EMAIL_NO_HOST_BASE.format(
message=notification.message.lower(),
alarm_name=notification.alarm_name,
state=notification.state,
timestamp=timestamp,
alarm_id=notification.alarm_id,
metric_dimensions=dimensions,
link=notification.link,
grafana_url=graf_url,
lifecycle_state=notification.lifecycle_state
)
subject = u'{} {} "{}" '.format(notification.state,
notification.severity,
notification.alarm_name)
msg = email.mime.text.MIMEText(text, 'plain', 'utf-8')
msg['Subject'] = email.header.Header(subject, 'utf-8')
msg['From'] = CONF.email_notifier.from_addr
msg['To'] = notification.address
msg['Date'] = email.utils.formatdate(localtime=True, usegmt=True)
return msg
def _get_link_url(self, metric, timestamp_ms):
"""Returns the url to Grafana including a query with the
respective metric info (name, dimensions, timestamp)
:param metric: the metric for which to display the graph in Grafana
:param timestamp_ms: timestamp of the alarm for the metric in milliseconds
:return: the url to the graph for the given metric or None if no Grafana host
has been defined.
"""
grafana_url = CONF.email_notifier.grafana_url
if grafana_url is None:
return None
url = ''
metric_query = ''
metric_query = "?metric=%s" % metric['name']
dimensions = metric['dimensions']
for key, value in six.iteritems(dimensions):
metric_query += "&dim_%s=%s" % (key, value)
# Show the graph within a range of ten minutes before and after the alarm occurred.
offset = 600000
from_ms = timestamp_ms - offset
to_ms = timestamp_ms + offset
time_query = "&from=%s&to=%s" % (from_ms, to_ms)
url = grafana_url + '/dashboard/script/drilldown.js'
return url + metric_query + time_query
def _format_dimensions(notification):
dimension_sets = []
for metric in notification.metrics:
dimension_sets.append(metric['dimensions'])
dim_set_strings = []
for dimension_set in dimension_sets:
key_value_pairs = []
for key, value in dimension_set.items():
key_value_pairs.append(u' {}: {}'.format(key, value))
set_string = u' {\n' + u',\n'.join(key_value_pairs) + u'\n }'
dim_set_strings.append(set_string)
dimensions = u'[\n' + u',\n'.join(dim_set_strings) + u' \n]'
return dimensions
email_notifier_group = cfg.OptGroup(name='%s_notifier' % EmailNotifier.type)
email_notifier_opts = [
cfg.StrOpt(name='from_addr'),
cfg.HostAddressOpt(name='server'),
cfg.PortOpt(name='port', default=25),
cfg.IntOpt(name='timeout', default=5, min=1),
cfg.StrOpt(name='user', default=None),
cfg.StrOpt(name='password', default=None, secret=True),
cfg.StrOpt(name='grafana_url', default=None)
]
def register_opts(conf):
conf.register_group(email_notifier_group)
conf.register_opts(email_notifier_opts, group=email_notifier_group)
def list_opts():
return {
email_notifier_group: email_notifier_opts
}
| apache-2.0 | 4,724,552,376,887,592,000 | 34.235669 | 95 | 0.596439 | false | 4.266872 | false | false | false |
MadsJensen/malthe_alpha_project | source_connectivity_permutation.py | 1 | 6505 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 08:41:17 2015.
@author: mje
"""
import numpy as np
import numpy.random as npr
import os
import socket
import mne
# import pandas as pd
from mne.connectivity import spectral_connectivity
from mne.minimum_norm import (apply_inverse_epochs, read_inverse_operator)
# Permutation test.
def permutation_resampling(case, control, num_samples, statistic):
"""
Permutation test.
Return p-value that statistic for case is different
from statistc for control.
"""
observed_diff = abs(statistic(case) - statistic(control))
num_case = len(case)
combined = np.concatenate([case, control])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_case]) - np.mean(xs[num_case:])
diffs.append(diff)
pval = (np.sum(diffs > observed_diff) +
np.sum(diffs < -observed_diff))/float(num_samples)
return pval, observed_diff, diffs
def permutation_test(a, b, num_samples, statistic):
"""
Permutation test.
Return p-value that statistic for a is different
from statistc for b.
"""
observed_diff = abs(statistic(b) - statistic(a))
num_a = len(a)
combined = np.concatenate([a, b])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_a]) - np.mean(xs[num_a:])
diffs.append(diff)
pval = np.sum(np.abs(diffs) >= np.abs(observed_diff)) / float(num_samples)
return pval, observed_diff, diffs
# Setup paths and prepare raw data
hostname = socket.gethostname()
if hostname == "Wintermute":
data_path = "/home/mje/mnt/caa/scratch/"
n_jobs = 1
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
n_jobs = 1
subjects_dir = data_path + "fs_subjects_dir/"
# change dir to save files the rigth place
os.chdir(data_path)
fname_inv = data_path + '0001-meg-oct-6-inv.fif'
fname_epochs = data_path + '0001_p_03_filter_ds_ica-mc_tsss-epo.fif'
fname_evoked = data_path + "0001_p_03_filter_ds_ica-mc_raw_tsss-ave.fif"
# Parameters
snr = 1.0 # Standard assumption for average data but using it for single trial
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
inverse_operator = read_inverse_operator(fname_inv)
epochs = mne.read_epochs(fname_epochs)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
#labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Lobes',
labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Brodmann',
regexp="Brodmann",
subjects_dir=subjects_dir)
labels_occ = labels[6:12]
# labels = mne.read_labels_from_annot('subject_1', parc='aparc.DKTatlas40',
# subjects_dir=subjects_dir)
for cond in epochs.event_id.keys():
stcs = apply_inverse_epochs(epochs[cond], inverse_operator, lambda2,
method, pick_ori="normal")
exec("stcs_%s = stcs" % cond)
labels_name = [label.name for label in labels_occ]
for label in labels_occ:
labels_name += [label.name]
# Extract time series
ts_ctl_left = mne.extract_label_time_course(stcs_ctl_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip")
ts_ent_left = mne.extract_label_time_course(stcs_ent_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip")
stcs_all_left = stcs_ctl_left + stcs_ent_left
ts_all_left = np.asarray(mne.extract_label_time_course(stcs_all_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip"))
number_of_permutations = 2000
index = np.arange(0, len(ts_all_left))
permutations_results = np.empty(number_of_permutations)
fmin, fmax = 7, 12
tmin, tmax = 0, 1
con_method = "plv"
diff_permuatation = np.empty([6, 6, number_of_permutations])
# diff
con_ctl, freqs_ctl, times_ctl, n_epochs_ctl, n_tapers_ctl =\
spectral_connectivity(
ts_ctl_left,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1,
verbose=None)
con_ent, freqs_ent, times_ent, n_epochs_ent, n_tapers_ent =\
spectral_connectivity(
ts_ent_left,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1,
verbose=None)
diff = con_ctl[:, :, 0] - con_ent[:, :, 0]
for i in range(number_of_permutations):
index = np.random.permutation(index)
tmp_ctl = ts_all_left[index[:64], :, :]
tmp_case = ts_all_left[index[64:], :, :]
con_ctl, freqs_ctl, times_ctl, n_epochs_ctl, n_tapers_ctl =\
spectral_connectivity(
tmp_ctl,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1)
con_case, freqs_case, times_case, n_epochs_case, n_tapers_case =\
spectral_connectivity(
tmp_case,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1)
diff_permuatation[:, :, i] = con_ctl[:, :, 0] - con_case[:, :, 0]
pval = np.empty_like(diff)
for h in range(diff.shape[0]):
for j in range(diff.shape[1]):
if diff[h, j] != 0:
pval[h, j] = np.sum(np.abs(diff_permuatation[h, h, :] >=
np.abs(diff[h, j, :])))/float(number_of_permutations)
# np.sum(np.abs(diff[h, j]) >= np.abs(
# diff_permuatation[h, j, :]))\
# / float(number_of_permutations)
| mit | 1,237,449,237,555,734,500 | 29.539906 | 79 | 0.563105 | false | 3.303707 | false | false | false |
nnmware/nnmware | apps/money/admin.py | 1 | 2443 | # nnmware(c)2012-2020
from __future__ import unicode_literals
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from nnmware.apps.money.models import Transaction, Bill, Currency, ExchangeRate
@admin.register(Transaction)
class TransactionAdmin(admin.ModelAdmin):
list_display = ('user', 'date', 'actor', 'status', 'amount', 'currency', 'content_object')
search_fields = ('name', )
list_filter = ('user', 'date')
ordering = ('user', )
# readonly_fields = ('actor_ctype','actor_oid','target_ctype','target_oid')
fieldsets = (
(_("Transaction"), {"fields": [("user", "date"),
('amount', 'currency', 'status'),
('actor_ctype', 'actor_oid'),
('content_type', 'object_id')]}),
)
_readonly_fields = [] # Default fields that are readonly for everyone.
def get_readonly_fields(self, request, obj=None):
readonly = list(self._readonly_fields)
if request.user.is_staff and not request.user.is_superuser:
readonly.extend(['user', 'date', 'actor_ctype', 'actor_oid', 'content_type', 'object_id', 'amount',
'currency', 'status'])
return readonly
@admin.register(Bill)
class BillAdmin(admin.ModelAdmin):
list_display = ('user', 'invoice_number', 'date_billed', 'content_object', 'status', 'amount', 'currency')
search_fields = ('name',)
list_filter = ('user', 'date_billed')
ordering = ('user', )
# readonly_fields = ('target_ctype','target_oid')
fieldsets = (
(_("Bill"), {"fields": [("user", "date_billed"),
('amount', 'currency'),
('content_type', 'object_id'),
('invoice_number', 'description_small'),
('description',),
('status', 'date')]}),
)
@admin.register(Currency)
class CurrencyAdmin(admin.ModelAdmin):
list_display = ('code',)
search_fields = ('name',)
@admin.register(ExchangeRate)
class ExchangeRateAdmin(admin.ModelAdmin):
list_display = ('currency', 'date', 'nominal', 'official_rate', 'rate')
search_fields = ('currency',)
fieldsets = (
(_("Exchange Rate"), {"fields": [("currency", "date"), ('nominal', 'official_rate', 'rate')]}),
)
| gpl-3.0 | 3,056,534,765,082,308,000 | 37.171875 | 111 | 0.552599 | false | 4.044702 | false | false | false |
t3dev/odoo | addons/website_slides_survey/tests/test_course_certification_failure.py | 1 | 6421 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.survey.tests.common import SurveyCase
class TestCourseCertificationFailureFlow(SurveyCase):
def test_course_certification_failure_flow(self):
# Step 1: create a simple certification
# --------------------------------------------------
with self.sudo(self.survey_user):
certification = self.env['survey.survey'].create({
'title': 'Small course certification',
'access_mode': 'public',
'users_login_required': True,
'scoring_type': 'scoring_with_answers',
'certificate': True,
'is_attempts_limited': True,
'passing_score': 100.0,
'attempts_limit': 2,
'stage_id': self.env['survey.stage'].search([('closed', '=', False)]).id
})
self._add_question(
None, 'Question 1', 'simple_choice',
sequence=1,
survey_id=certification.id,
labels=[
{'value': 'Wrong answer'},
{'value': 'Correct answer', 'is_correct': True, 'answer_score': 1.0}
])
self._add_question(
None, 'Question 2', 'simple_choice',
sequence=2,
survey_id=certification.id,
labels=[
{'value': 'Wrong answer'},
{'value': 'Correct answer', 'is_correct': True, 'answer_score': 1.0}
])
# Step 1.1: create a simple channel
self.channel = self.env['slide.channel'].sudo().create({
'name': 'Test Channel',
'channel_type': 'training',
'enroll': 'public',
'visibility': 'public',
'website_published': True,
})
# Step 2: link the certification to a slide of type 'certification'
self.slide_certification = self.env['slide.slide'].sudo().create({
'name': 'Certification slide',
'channel_id': self.channel.id,
'slide_type': 'certification',
'survey_id': certification.id,
'website_published': True,
})
# Step 3: add public user as member of the channel
self.channel._action_add_members(self.user_public.partner_id)
# forces recompute of partner_ids as we create directly in relation
self.channel.invalidate_cache()
slide_partner = self.slide_certification._action_set_viewed(self.user_public.partner_id)
self.slide_certification.sudo(self.user_public)._generate_certification_url()
self.assertEqual(1, len(slide_partner.user_input_ids), 'A user input should have been automatically created upon slide view')
# Step 4: fill in the created user_input with wrong answers
self.fill_in_answer(slide_partner.user_input_ids[0], certification.question_ids)
self.assertFalse(slide_partner.survey_quizz_passed, 'Quizz should not be marked as passed with wrong answers')
# forces recompute of partner_ids as we delete directly in relation
self.channel.invalidate_cache()
self.assertIn(self.user_public.partner_id, self.channel.partner_ids, 'Public user should still be a member of the course because he still has attempts left')
# Step 5: simulate a 'retry'
retry_user_input = self.slide_certification.survey_id.sudo()._create_answer(
partner=self.user_public.partner_id,
**{
'slide_id': self.slide_certification.id,
'slide_partner_id': slide_partner.id
},
invite_token=slide_partner.user_input_ids[0].invite_token
)
# Step 6: fill in the new user_input with wrong answers again
self.fill_in_answer(retry_user_input, certification.question_ids)
# forces recompute of partner_ids as we delete directly in relation
self.channel.invalidate_cache()
self.assertNotIn(self.user_public.partner_id, self.channel.partner_ids, 'Public user should have been kicked out of the course because he failed his last attempt')
# Step 7: add public user as member of the channel once again
self.channel._action_add_members(self.user_public.partner_id)
# forces recompute of partner_ids as we create directly in relation
self.channel.invalidate_cache()
self.assertIn(self.user_public.partner_id, self.channel.partner_ids, 'Public user should be a member of the course once again')
new_slide_partner = self.slide_certification._action_set_viewed(self.user_public.partner_id)
self.slide_certification.sudo(self.user_public)._generate_certification_url()
self.assertEqual(1, len(new_slide_partner.user_input_ids.filtered(lambda user_input: user_input.state != 'done')), 'A new user input should have been automatically created upon slide view')
# Step 8: fill in the created user_input with correct answers this time
self.fill_in_answer(new_slide_partner.user_input_ids.filtered(lambda user_input: user_input.state != 'done')[0], certification.question_ids, good_answers=True)
self.assertTrue(new_slide_partner.survey_quizz_passed, 'Quizz should be marked as passed with correct answers')
# forces recompute of partner_ids as we delete directly in relation
self.channel.invalidate_cache()
self.assertIn(self.user_public.partner_id, self.channel.partner_ids, 'Public user should still be a member of the course')
def fill_in_answer(self, answer, questions, good_answers=False):
""" Fills in the user_input with answers for all given questions.
You can control whether the answer will be correct or not with the 'good_answers' param.
(It's assumed that wrong answers are at index 0 of question.labels_ids and good answers at index 1) """
answer.write({
'state': 'done',
'user_input_line_ids': [
(0, 0, {
'question_id': question.id,
'answer_type': 'suggestion',
'answer_score': 1 if good_answers else 0,
'value_suggested': question.labels_ids[1 if good_answers else 0].id
}) for question in questions
]
})
| gpl-3.0 | 2,418,318,624,319,238,000 | 51.631148 | 197 | 0.611587 | false | 4.196732 | false | false | false |
dikaiosune/nau-elc-force-completion-mailer | mailer.py | 1 | 7174 | __author__ = 'adam'
import configparser
import logging
import argparse
import smtplib
from email.mime.text import MIMEText
from datetime import datetime, timedelta
from xlrd import open_workbook
from jinja2 import Template
template = Template("""
<p>Dear {{ instructor.first }},</p>
<p>One of the most common Blackboard Learn problems encountered by students and instructors is a student's inability to
resume taking a quiz or test after a temporary network glitch. This is greatly exacerbated by the use of the "Force
Completion" test option. We strongly recommend that you never use this option in your Bb quiz or test unless you
have a very specific pedagogical reason to do so. If you are not familiar with the option, a more detailed
explanation is available at <a href="https://bblearn.nau.edu/bbcswebdav/xid-28427315_1" target="_blank">this page</a>.
If you are familiar with this option and would like to keep it in place regardless, please ignore the rest of this
message.
</p>
<p>We have run a report to find tests and quizzes in your {{ term }} courses that have the Force Completion option
selected. We <i>strongly</i> encourage you to disable this option and to use <b>Auto-Submit</b> instead. To turn off
Force Completion for these items, simply find the item in your course (we have done our best to identify where that
is), select <b>Edit the Test Options</b> from its drop-down menu, and under the <b>Test Availability</b> section,
deselect/uncheck <b>Force Completion</b>, then click <b>Submit</b>. </p>
<p>{{ term }} tests with Force Completion enabled as of {{ day_before_report }}:</p>
<ul>
{% for course, tests in instructor.courses.items() %}
<li> {{ course }}
<ul>
{% for test in tests %} <li> {{ test }} </li>
{% endfor %}
</ul>
<br/>
</li>
{% endfor %}
</ul>
<p>Please contact the e-Learning Center if you would like to discuss this setting. In short, we recommend that you never
use the Force Completion option.</p>
<p>
<a href="http://nau.edu/elc">e-Learning Center</a><br>
<a href="mailto:[email protected]">[email protected]</a><br>
In Flagstaff: +1 (928) 523-5554<br>
Elsewhere: +1 (866) 802-5256<br>
</p>
""")
def create_root_logger(log_file):
parent_logger = logging.getLogger('nau_force_completion')
parent_logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(name)s^%(levelname)s: %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
parent_logger.addHandler(fh)
parent_logger.addHandler(ch)
def parse_cli_arguments():
argparser = argparse.ArgumentParser(description='Force Completion email tool for ELC @ NAU.')
argparser.add_argument('--config', required=True, help='Path to ini file.', metavar='FILE')
argparser.add_argument('--report', required=True, metavar='FILE',
help='Path to the force completion report file.')
argparser.add_argument('--dry-run', action='store_true',
help='Add this flag to send all emails to the default address specified in the ini file.')
return vars(argparser.parse_args())
# parse CLI args for:
args = parse_cli_arguments()
# dry-run?
dry_run = args['dry_run']
# report file
report_file = args['report']
config_file = args['config']
# parse some report metadata from the filename
'force-completion-1154-2015-08-04_120920'
filename = report_file[report_file.index('force-completion-'):]
termcode = filename[17:21]
term = {'1': 'Spring', '4': 'Summer', '7': 'Fall', '8': 'Winter'}[termcode[3]] + ' 20' + termcode[1:3]
day_before_report = datetime.strptime(filename[22:32], '%Y-%m-%d') - timedelta(days=1)
day_before_report = day_before_report.strftime('%A %B %d, %Y')
# read configuration
config = configparser.ConfigParser()
config.read(config_file)
config = config['FORCE_COMPLETE']
# setup root logger
logfile = config.get('logfile', 'force-completion-mailer.log')
create_root_logger(logfile)
log = logging.getLogger('nau_force_completion.mailer')
log.debug("Parameters: %s", args)
log.debug("Config: %s", {k: config[k] for k in config})
# get default email
default_email = config.get('default_email')
# get server info
smtp_server = config['smtp_server']
smtp_port = config['smtp_port']
sender = smtplib.SMTP(host=smtp_server, port=smtp_port)
# parse report into instructors, courses and tests
report = open_workbook(filename=report_file).sheet_by_index(0)
header_keys = [report.cell(0, idx).value for idx in range(report.ncols)]
rows_as_dict_list = []
for row_index in range(1, report.nrows):
d = {header_keys[col_index]: report.cell(row_index, col_index).value
for col_index in range(report.ncols)}
rows_as_dict_list.append(d)
instructors = {}
num_instructors = 0
num_courses = 0
num_tests = 0
while len(rows_as_dict_list) > 0:
row = rows_as_dict_list.pop()
uid = row['PI UID']
first_name = row['PI First Name']
last_name = row['PI Last Name']
email = row['PI Email']
course_id = row['Course ID']
course_name = row['Course Name']
test_name = row['Test Name']
test_path = row['Path to Test']
if uid not in instructors:
instructors[uid] = {'first': first_name, 'last': last_name, 'email': email, 'courses': {}}
num_instructors += 1
if course_name not in instructors[uid]['courses']:
instructors[uid]['courses'][course_name] = []
num_courses += 1
instructors[uid]['courses'][course_name].append(test_path + ' > ' + test_name)
num_tests += 1
# remove the course id from the data structure, it's no longer needed for templating
for i in instructors:
for c in instructors[i]['courses']:
instructors[i]['courses'][c] = sorted(instructors[i]['courses'][c])
# print stats on report (num instructors, num courses, num tests)
log.info('Report successfully parsed.')
log.info('%s instructors found in report.', num_instructors)
log.info('%s courses found in report.', num_courses)
log.info('%s tests found in report.', num_tests)
log.info('Sending %s emails...', num_instructors)
# render templates and send emails
emails_sent = 0
for uid in instructors:
instructor = instructors.get(uid)
current_email = template.render(instructor=instructor, term=term, day_before_report=day_before_report)
msg = MIMEText(current_email, 'html')
msg['Subject'] = 'Bb Learn Force Completion Notification'
msg['From'] = 'e-Learning Center <[email protected]>'
# if it's a dry run, send to the test email, rather than each individual user
to_addr = default_email if dry_run else instructor.get('email')
instructor_name = instructor['first'] + ' ' + instructor['last']
msg['To'] = instructor_name + ' <' + to_addr + '>'
sender.sendmail(from_addr='[email protected]', to_addrs=to_addr, msg=msg.as_string())
emails_sent += 1
log.info('Sent email to %s (%s), %s/%s sent.', instructor_name, to_addr, emails_sent, num_instructors)
sender.quit()
| mit | -1,298,246,076,243,391,500 | 35.050251 | 122 | 0.681907 | false | 3.387158 | true | false | false |
jackrzhang/zulip | zerver/views/invite.py | 1 | 5106 |
from django.conf import settings
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from typing import List, Optional, Set
from zerver.decorator import require_realm_admin, to_non_negative_int, \
require_non_guest_human_user
from zerver.lib.actions import do_invite_users, do_revoke_user_invite, do_resend_user_invite_email, \
get_default_subs, do_get_user_invites, do_create_multiuse_invite_link
from zerver.lib.request import REQ, has_request_variables, JsonableError
from zerver.lib.response import json_success, json_error, json_response
from zerver.lib.streams import access_stream_by_name, access_stream_by_id
from zerver.lib.validator import check_string, check_list, check_bool, check_int
from zerver.models import PreregistrationUser, Stream, UserProfile
import re
@require_non_guest_human_user
@has_request_variables
def invite_users_backend(request: HttpRequest, user_profile: UserProfile,
invitee_emails_raw: str=REQ("invitee_emails"),
invite_as_admin: Optional[bool]=REQ(validator=check_bool, default=False),
) -> HttpResponse:
if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin:
return json_error(_("Must be an organization administrator"))
if invite_as_admin and not user_profile.is_realm_admin:
return json_error(_("Must be an organization administrator"))
if not invitee_emails_raw:
return json_error(_("You must specify at least one email address."))
invitee_emails = get_invitee_emails_set(invitee_emails_raw)
stream_names = request.POST.getlist('stream')
if not stream_names:
return json_error(_("You must specify at least one stream for invitees to join."))
# We unconditionally sub you to the notifications stream if it
# exists and is public.
notifications_stream = user_profile.realm.notifications_stream # type: Optional[Stream]
if notifications_stream and not notifications_stream.invite_only:
stream_names.append(notifications_stream.name)
streams = [] # type: List[Stream]
for stream_name in stream_names:
try:
(stream, recipient, sub) = access_stream_by_name(user_profile, stream_name)
except JsonableError:
return json_error(_("Stream does not exist: %s. No invites were sent.") % (stream_name,))
streams.append(stream)
do_invite_users(user_profile, invitee_emails, streams, invite_as_admin)
return json_success()
def get_invitee_emails_set(invitee_emails_raw: str) -> Set[str]:
invitee_emails_list = set(re.split(r'[,\n]', invitee_emails_raw))
invitee_emails = set()
for email in invitee_emails_list:
is_email_with_name = re.search(r'<(?P<email>.*)>', email)
if is_email_with_name:
email = is_email_with_name.group('email')
invitee_emails.add(email.strip())
return invitee_emails
@require_realm_admin
def get_user_invites(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
all_users = do_get_user_invites(user_profile)
return json_success({'invites': all_users})
@require_realm_admin
@has_request_variables
def revoke_user_invite(request: HttpRequest, user_profile: UserProfile,
prereg_id: int) -> HttpResponse:
try:
prereg_user = PreregistrationUser.objects.get(id=prereg_id)
except PreregistrationUser.DoesNotExist:
raise JsonableError(_("No such invitation"))
if prereg_user.referred_by.realm != user_profile.realm:
raise JsonableError(_("No such invitation"))
do_revoke_user_invite(prereg_user)
return json_success()
@require_realm_admin
@has_request_variables
def resend_user_invite_email(request: HttpRequest, user_profile: UserProfile,
prereg_id: int) -> HttpResponse:
try:
prereg_user = PreregistrationUser.objects.get(id=prereg_id)
except PreregistrationUser.DoesNotExist:
raise JsonableError(_("No such invitation"))
if (prereg_user.referred_by.realm != user_profile.realm):
raise JsonableError(_("No such invitation"))
timestamp = do_resend_user_invite_email(prereg_user)
return json_success({'timestamp': timestamp})
@require_realm_admin
@has_request_variables
def generate_multiuse_invite_backend(request: HttpRequest, user_profile: UserProfile,
stream_ids: List[int]=REQ(validator=check_list(check_int),
default=[])) -> HttpResponse:
streams = []
for stream_id in stream_ids:
try:
(stream, recipient, sub) = access_stream_by_id(user_profile, stream_id)
except JsonableError:
return json_error(_("Invalid stream id {}. No invites were sent.".format(stream_id)))
streams.append(stream)
invite_link = do_create_multiuse_invite_link(user_profile, streams)
return json_success({'invite_link': invite_link})
| apache-2.0 | -138,191,675,718,502,620 | 42.641026 | 101 | 0.686643 | false | 3.816143 | false | false | false |
sanguinariojoe/FreeCAD | src/Mod/Fem/femtaskpanels/task_solver_ccxtools.py | 9 | 16191 | # ***************************************************************************
# * Copyright (c) 2015 Bernd Hahnebach <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM solver calculix ccx tools task panel for the document object"
__author__ = "Bernd Hahnebach"
__url__ = "https://www.freecadweb.org"
## @package task_solver_ccxtools
# \ingroup FEM
# \brief task panel for solver ccx tools object
import os
import sys
import time
from PySide import QtCore
from PySide import QtGui
from PySide.QtCore import Qt
from PySide.QtGui import QApplication
import FreeCAD
import FreeCADGui
import FemGui
if sys.version_info.major >= 3:
def unicode(text, *args):
return str(text)
class _TaskPanel:
"""
The TaskPanel for CalculiX ccx tools solver object
"""
def __init__(self, solver_object):
self.form = FreeCADGui.PySideUic.loadUi(
FreeCAD.getHomePath() + "Mod/Fem/Resources/ui/SolverCalculix.ui"
)
from femtools.ccxtools import CcxTools as ccx
# we do not need to pass the analysis, it will be found on fea init
# TODO: if there is not analysis object in document init of fea
# will fail with an exception and task panel will not open
# handle more smart by a pop up error message and still open
# task panel, may be deactivate write and run button.
self.fea = ccx(solver_object)
self.fea.setup_working_dir()
self.fea.setup_ccx()
self.Calculix = QtCore.QProcess()
self.Timer = QtCore.QTimer()
self.Timer.start(300)
self.fem_console_message = ""
# Connect Signals and Slots
QtCore.QObject.connect(
self.form.tb_choose_working_dir,
QtCore.SIGNAL("clicked()"),
self.choose_working_dir
)
QtCore.QObject.connect(
self.form.pb_write_inp,
QtCore.SIGNAL("clicked()"),
self.write_input_file_handler
)
QtCore.QObject.connect(
self.form.pb_edit_inp,
QtCore.SIGNAL("clicked()"),
self.editCalculixInputFile
)
QtCore.QObject.connect(
self.form.pb_run_ccx,
QtCore.SIGNAL("clicked()"),
self.runCalculix
)
QtCore.QObject.connect(
self.form.rb_static_analysis,
QtCore.SIGNAL("clicked()"),
self.select_static_analysis
)
QtCore.QObject.connect(
self.form.rb_frequency_analysis,
QtCore.SIGNAL("clicked()"),
self.select_frequency_analysis
)
QtCore.QObject.connect(
self.form.rb_thermomech_analysis,
QtCore.SIGNAL("clicked()"),
self.select_thermomech_analysis
)
QtCore.QObject.connect(
self.form.rb_check_mesh,
QtCore.SIGNAL("clicked()"),
self.select_check_mesh
)
QtCore.QObject.connect(
self.form.rb_buckling_analysis,
QtCore.SIGNAL("clicked()"),
self.select_buckling_analysis
)
QtCore.QObject.connect(
self.Calculix,
QtCore.SIGNAL("started()"),
self.calculixStarted
)
QtCore.QObject.connect(
self.Calculix,
QtCore.SIGNAL("stateChanged(QProcess::ProcessState)"),
self.calculixStateChanged
)
QtCore.QObject.connect(
self.Calculix,
QtCore.SIGNAL("error(QProcess::ProcessError)"),
self.calculixError
)
QtCore.QObject.connect(
self.Calculix,
QtCore.SIGNAL("finished(int)"),
self.calculixFinished
)
QtCore.QObject.connect(
self.Timer,
QtCore.SIGNAL("timeout()"),
self.UpdateText
)
self.update()
def getStandardButtons(self):
# only show a close button
# def accept() in no longer needed, since there is no OK button
return int(QtGui.QDialogButtonBox.Close)
def reject(self):
FreeCADGui.ActiveDocument.resetEdit()
def update(self):
"fills the widgets"
self.form.le_working_dir.setText(self.fea.working_dir)
if self.fea.solver.AnalysisType == "static":
self.form.rb_static_analysis.setChecked(True)
elif self.fea.solver.AnalysisType == "frequency":
self.form.rb_frequency_analysis.setChecked(True)
elif self.fea.solver.AnalysisType == "thermomech":
self.form.rb_thermomech_analysis.setChecked(True)
elif self.fea.solver.AnalysisType == "check":
self.form.rb_check_mesh.setChecked(True)
elif self.fea.solver.AnalysisType == "buckling":
self.form.rb_buckling_analysis.setChecked(True)
return
def femConsoleMessage(self, message="", color="#000000"):
if sys.version_info.major < 3:
message = message.encode("utf-8", "replace")
self.fem_console_message = self.fem_console_message + (
'<font color="#0000FF">{0:4.1f}:</font> <font color="{1}">{2}</font><br>'
.format(time.time() - self.Start, color, message)
)
self.form.textEdit_Output.setText(self.fem_console_message)
self.form.textEdit_Output.moveCursor(QtGui.QTextCursor.End)
def printCalculiXstdout(self):
out = self.Calculix.readAllStandardOutput()
# print(type(out))
# <class 'PySide2.QtCore.QByteArray'>
if out.isEmpty():
self.femConsoleMessage("CalculiX stdout is empty", "#FF0000")
return False
if sys.version_info.major >= 3:
# https://forum.freecadweb.org/viewtopic.php?f=18&t=39195
# convert QByteArray to a binary string an decode it to "utf-8"
out = out.data().decode() # "utf-8" can be omitted
# print(type(out))
# print(out)
else:
try:
out = unicode(out, "utf-8", "replace")
rx = QtCore.QRegExp("\\*ERROR.*\\n\\n")
# print(rx)
rx.setMinimal(True)
pos = rx.indexIn(out)
while not pos < 0:
match = rx.cap(0)
FreeCAD.Console.PrintError(match.strip().replace("\n", " ") + "\n")
pos = rx.indexIn(out, pos + 1)
except UnicodeDecodeError:
self.femConsoleMessage("Error converting stdout from CalculiX", "#FF0000")
out = os.linesep.join([s for s in out.splitlines() if s])
out = out.replace("\n", "<br>")
# print(out)
self.femConsoleMessage(out)
if "*ERROR in e_c3d: nonpositive jacobian" in out:
error_message = (
"\n\nCalculiX returned an error due to "
"nonpositive jacobian determinant in at least one element\n"
"Use the run button on selected solver to get a better error output.\n"
)
FreeCAD.Console.PrintError(error_message)
if "*ERROR" in out:
return False
else:
return True
def UpdateText(self):
if(self.Calculix.state() == QtCore.QProcess.ProcessState.Running):
self.form.l_time.setText("Time: {0:4.1f}: ".format(time.time() - self.Start))
def calculixError(self, error=""):
print("Error() {}".format(error))
self.femConsoleMessage("CalculiX execute error: {}".format(error), "#FF0000")
def calculixNoError(self):
print("CalculiX done without error!")
self.femConsoleMessage("CalculiX done without error!", "#00AA00")
def calculixStarted(self):
# print("calculixStarted()")
FreeCAD.Console.PrintLog("calculix state: {}\n".format(self.Calculix.state()))
self.form.pb_run_ccx.setText("Break CalculiX")
def calculixStateChanged(self, newState):
if (newState == QtCore.QProcess.ProcessState.Starting):
self.femConsoleMessage("Starting CalculiX...")
if (newState == QtCore.QProcess.ProcessState.Running):
self.femConsoleMessage("CalculiX is running...")
if (newState == QtCore.QProcess.ProcessState.NotRunning):
self.femConsoleMessage("CalculiX stopped.")
def calculixFinished(self, exitCode):
# print("calculixFinished(), exit code: {}".format(exitCode))
FreeCAD.Console.PrintLog("calculix state: {}\n".format(self.Calculix.state()))
# Restore previous cwd
QtCore.QDir.setCurrent(self.cwd)
self.Timer.stop()
if self.printCalculiXstdout():
self.calculixNoError()
else:
self.calculixError()
self.form.pb_run_ccx.setText("Re-run CalculiX")
self.femConsoleMessage("Loading result sets...")
self.form.l_time.setText("Time: {0:4.1f}: ".format(time.time() - self.Start))
self.fea.reset_mesh_purge_results_checked()
self.fea.inp_file_name = self.fea.inp_file_name
# check if ccx is greater than 2.10, if not do not read results
# https://forum.freecadweb.org/viewtopic.php?f=18&t=23548#p183829 Point 3
# https://forum.freecadweb.org/viewtopic.php?f=18&t=23548&start=20#p183909
# https://forum.freecadweb.org/viewtopic.php?f=18&t=23548&start=30#p185027
# https://github.com/FreeCAD/FreeCAD/commit/3dd1c9f
majorVersion, minorVersion = self.fea.get_ccx_version()
if majorVersion == 2 and minorVersion <= 10:
message = (
"The used CalculiX version {}.{} creates broken output files. "
"The result file will not be read by FreeCAD FEM. "
"You still can try to read it stand alone with FreeCAD, but it is "
"strongly recommended to upgrade CalculiX to a newer version.\n"
.format(majorVersion, minorVersion)
)
QtGui.QMessageBox.warning(None, "Upgrade CalculiX", message)
raise
QApplication.setOverrideCursor(Qt.WaitCursor)
try:
self.fea.load_results()
except Exception:
FreeCAD.Console.PrintError("loading results failed\n")
QApplication.restoreOverrideCursor()
self.form.l_time.setText("Time: {0:4.1f}: ".format(time.time() - self.Start))
def choose_working_dir(self):
wd = QtGui.QFileDialog.getExistingDirectory(None, "Choose CalculiX working directory",
self.fea.working_dir)
if os.path.isdir(wd):
self.fea.setup_working_dir(wd)
self.form.le_working_dir.setText(self.fea.working_dir)
def write_input_file_handler(self):
self.Start = time.time()
self.form.l_time.setText("Time: {0:4.1f}: ".format(time.time() - self.Start))
QApplication.restoreOverrideCursor()
if self.check_prerequisites_helper():
QApplication.setOverrideCursor(Qt.WaitCursor)
self.fea.write_inp_file()
if self.fea.inp_file_name != "":
self.femConsoleMessage("Write completed.")
self.form.pb_edit_inp.setEnabled(True)
self.form.pb_run_ccx.setEnabled(True)
else:
self.femConsoleMessage("Write .inp file failed!", "#FF0000")
QApplication.restoreOverrideCursor()
self.form.l_time.setText("Time: {0:4.1f}: ".format(time.time() - self.Start))
def check_prerequisites_helper(self):
self.Start = time.time()
self.femConsoleMessage("Check dependencies...")
self.form.l_time.setText("Time: {0:4.1f}: ".format(time.time() - self.Start))
self.fea.update_objects()
message = self.fea.check_prerequisites()
if message != "":
QtGui.QMessageBox.critical(None, "Missing prerequisite(s)", message)
return False
return True
def start_ext_editor(self, ext_editor_path, filename):
if not hasattr(self, "ext_editor_process"):
self.ext_editor_process = QtCore.QProcess()
if self.ext_editor_process.state() != QtCore.QProcess.Running:
self.ext_editor_process.start(ext_editor_path, [filename])
def editCalculixInputFile(self):
print("editCalculixInputFile {}".format(self.fea.inp_file_name))
ccx_prefs = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Fem/Ccx")
if ccx_prefs.GetBool("UseInternalEditor", True):
FemGui.open(self.fea.inp_file_name)
else:
ext_editor_path = ccx_prefs.GetString("ExternalEditorPath", "")
if ext_editor_path:
self.start_ext_editor(ext_editor_path, self.fea.inp_file_name)
else:
print(
"External editor is not defined in FEM preferences. "
"Falling back to internal editor"
)
FemGui.open(self.fea.inp_file_name)
def runCalculix(self):
# print("runCalculix")
self.Start = time.time()
self.femConsoleMessage("CalculiX binary: {}".format(self.fea.ccx_binary))
self.femConsoleMessage("CalculiX input file: {}".format(self.fea.inp_file_name))
self.femConsoleMessage("Run CalculiX...")
FreeCAD.Console.PrintMessage(
"run CalculiX at: {} with: {}\n"
.format(self.fea.ccx_binary, self.fea.inp_file_name)
)
# change cwd because ccx may crash if directory has no write permission
# there is also a limit of the length of file names so jump to the document directory
self.cwd = QtCore.QDir.currentPath()
fi = QtCore.QFileInfo(self.fea.inp_file_name)
QtCore.QDir.setCurrent(fi.path())
self.Calculix.start(self.fea.ccx_binary, ["-i", fi.baseName()])
QApplication.restoreOverrideCursor()
def select_analysis_type(self, analysis_type):
if self.fea.solver.AnalysisType != analysis_type:
self.fea.solver.AnalysisType = analysis_type
self.form.pb_edit_inp.setEnabled(False)
self.form.pb_run_ccx.setEnabled(False)
def select_static_analysis(self):
self.select_analysis_type("static")
def select_frequency_analysis(self):
self.select_analysis_type("frequency")
def select_thermomech_analysis(self):
self.select_analysis_type("thermomech")
def select_check_mesh(self):
self.select_analysis_type("check")
def select_buckling_analysis(self):
self.select_analysis_type("buckling")
| lgpl-2.1 | -6,757,533,628,294,308,000 | 39.680905 | 94 | 0.577852 | false | 3.952881 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.