repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ACJTeam/enigma2 | lib/python/Components/AVSwitch.py | 1 | 10590 | from config import config, ConfigSlider, ConfigSelection, ConfigYesNo, ConfigEnableDisable, ConfigSubsection, ConfigBoolean, ConfigSelectionNumber, ConfigNothing, NoSave
from enigma import eAVSwitch, eDVBVolumecontrol, getDesktop
from SystemInfo import SystemInfo
import os
class AVSwitch:
def setInput(self, input):
INPUT = { "ENCODER": 0, "SCART": 1, "AUX": 2 }
eAVSwitch.getInstance().setInput(INPUT[input])
def setColorFormat(self, value):
eAVSwitch.getInstance().setColorFormat(value)
def setAspectRatio(self, value):
eAVSwitch.getInstance().setAspectRatio(value)
def setSystem(self, value):
eAVSwitch.getInstance().setVideomode(value)
def getOutputAspect(self):
valstr = config.av.aspectratio.value
if valstr in ("4_3_letterbox", "4_3_panscan"): # 4:3
return (4,3)
elif valstr == "16_9": # auto ... 4:3 or 16:9
try:
if "1" in open("/proc/stb/vmpeg/0/aspect", "r").read(): # 4:3
return (4,3)
except IOError:
pass
elif valstr in ("16_9_always", "16_9_letterbox"): # 16:9
pass
elif valstr in ("16_10_letterbox", "16_10_panscan"): # 16:10
return (16,10)
return (16,9)
def getFramebufferScale(self):
aspect = self.getOutputAspect()
fb_size = getDesktop(0).size()
return (aspect[0] * fb_size.height(), aspect[1] * fb_size.width())
def getAspectRatioSetting(self):
valstr = config.av.aspectratio.value
if valstr == "4_3_letterbox":
val = 0
elif valstr == "4_3_panscan":
val = 1
elif valstr == "16_9":
val = 2
elif valstr == "16_9_always":
val = 3
elif valstr == "16_10_letterbox":
val = 4
elif valstr == "16_10_panscan":
val = 5
elif valstr == "16_9_letterbox":
val = 6
return val
def setAspectWSS(self, aspect=None):
if not config.av.wss.value:
value = 2 # auto(4:3_off)
else:
value = 1 # auto
eAVSwitch.getInstance().setWSS(value)
def InitAVSwitch():
config.av = ConfigSubsection()
config.av.yuvenabled = ConfigBoolean(default=True)
colorformat_choices = {"cvbs": _("CVBS"), "rgb": _("RGB"), "svideo": _("S-Video")}
# when YUV is not enabled, don't let the user select it
if config.av.yuvenabled.value:
colorformat_choices["yuv"] = _("YPbPr")
config.av.colorformat = ConfigSelection(choices=colorformat_choices, default="rgb")
config.av.aspectratio = ConfigSelection(choices={
"4_3_letterbox": _("4:3 Letterbox"),
"4_3_panscan": _("4:3 PanScan"),
"16_9": _("16:9"),
"16_9_always": _("16:9 always"),
"16_10_letterbox": _("16:10 Letterbox"),
"16_10_panscan": _("16:10 PanScan"),
"16_9_letterbox": _("16:9 Letterbox")},
default = "16_9")
config.av.aspect = ConfigSelection(choices={
"4_3": _("4:3"),
"16_9": _("16:9"),
"16_10": _("16:10"),
"auto": _("Automatic")},
default = "auto")
policy2_choices = {
# TRANSLATORS: (aspect ratio policy: black bars on top/bottom) in doubt, keep english term.
"letterbox": _("Letterbox"),
# TRANSLATORS: (aspect ratio policy: cropped content on left/right) in doubt, keep english term
"panscan": _("Pan&scan"),
# TRANSLATORS: (aspect ratio policy: display as fullscreen, even if this breaks the aspect)
"scale": _("Just scale")}
try:
if "auto" in open("/proc/stb/video/policy2_choices").read():
# TRANSLATORS: (aspect ratio policy: always try to display as fullscreen, when there is no content (black bars) on left/right, even if this breaks the aspect.
policy2_choices.update({"auto": _("Auto")})
except:
pass
config.av.policy_169 = ConfigSelection(choices=policy2_choices, default = "letterbox")
policy_choices = {
# TRANSLATORS: (aspect ratio policy: black bars on left/right) in doubt, keep english term.
"pillarbox": _("Pillarbox"),
# TRANSLATORS: (aspect ratio policy: cropped content on left/right) in doubt, keep english term
"panscan": _("Pan&scan"),
# TRANSLATORS: (aspect ratio policy: display as fullscreen, with stretching the left/right)
"nonlinear": _("Nonlinear"),
# TRANSLATORS: (aspect ratio policy: display as fullscreen, even if this breaks the aspect)
"scale": _("Just scale")}
try:
if "auto" in open("/proc/stb/video/policy_choices").read():
# TRANSLATORS: (aspect ratio policy: always try to display as fullscreen, when there is no content (black bars) on left/right, even if this breaks the aspect.
policy_choices.update({"auto": _("Auto")})
except:
pass
config.av.policy_43 = ConfigSelection(choices=policy_choices, default = "pillarbox")
config.av.tvsystem = ConfigSelection(choices = {"pal": _("PAL"), "ntsc": _("NTSC"), "multinorm": _("multinorm")}, default="pal")
config.av.wss = ConfigEnableDisable(default = True)
config.av.generalAC3delay = ConfigSelectionNumber(-1000, 1000, 5, default = 0)
config.av.generalPCMdelay = ConfigSelectionNumber(-1000, 1000, 5, default = 0)
config.av.vcrswitch = ConfigEnableDisable(default = False)
iAVSwitch = AVSwitch()
def setColorFormat(configElement):
map = {"cvbs": 0, "rgb": 1, "svideo": 2, "yuv": 3}
iAVSwitch.setColorFormat(map[configElement.value])
def setAspectRatio(configElement):
map = {"4_3_letterbox": 0, "4_3_panscan": 1, "16_9": 2, "16_9_always": 3, "16_10_letterbox": 4, "16_10_panscan": 5, "16_9_letterbox" : 6}
iAVSwitch.setAspectRatio(map[configElement.value])
def setSystem(configElement):
map = {"pal": 0, "ntsc": 1, "multinorm" : 2}
iAVSwitch.setSystem(map[configElement.value])
def setWSS(configElement):
iAVSwitch.setAspectWSS()
# this will call the "setup-val" initial
config.av.colorformat.addNotifier(setColorFormat)
config.av.aspectratio.addNotifier(setAspectRatio)
config.av.tvsystem.addNotifier(setSystem)
config.av.wss.addNotifier(setWSS)
iAVSwitch.setInput("ENCODER") # init on startup
SystemInfo["ScartSwitch"] = eAVSwitch.getInstance().haveScartSwitch()
try:
SystemInfo["CanDownmixAC3"] = "downmix" in open("/proc/stb/audio/ac3_choices", "r").read()
except:
SystemInfo["CanDownmixAC3"] = False
if SystemInfo["CanDownmixAC3"]:
def setAC3Downmix(configElement):
open("/proc/stb/audio/ac3", "w").write(configElement.value and "downmix" or "passthrough")
config.av.downmix_ac3 = ConfigYesNo(default = True)
config.av.downmix_ac3.addNotifier(setAC3Downmix)
try:
SystemInfo["CanDownmixDTS"] = "downmix" in open("/proc/stb/audio/dts_choices", "r").read()
except:
SystemInfo["CanDownmixDTS"] = False
if SystemInfo["CanDownmixDTS"]:
def setDTSDownmix(configElement):
open("/proc/stb/audio/dts", "w").write(configElement.value and "downmix" or "passthrough")
config.av.downmix_dts = ConfigYesNo(default = True)
config.av.downmix_dts.addNotifier(setDTSDownmix)
try:
SystemInfo["CanDownmixAAC"] = "downmix" in open("/proc/stb/audio/aac_choices", "r").read()
except:
SystemInfo["CanDownmixAAC"] = False
if SystemInfo["CanDownmixAAC"]:
def setAACDownmix(configElement):
open("/proc/stb/audio/aac", "w").write(configElement.value and "downmix" or "passthrough")
config.av.downmix_aac = ConfigYesNo(default = True)
config.av.downmix_aac.addNotifier(setAACDownmix)
try:
SystemInfo["CanChangeOsdAlpha"] = open("/proc/stb/video/alpha", "r") and True or False
except:
SystemInfo["CanChangeOsdAlpha"] = False
if SystemInfo["CanChangeOsdAlpha"]:
def setAlpha(config):
open("/proc/stb/video/alpha", "w").write(str(config.value))
config.av.osd_alpha = ConfigSlider(default=255, limits=(0,255))
config.av.osd_alpha.addNotifier(setAlpha)
if os.path.exists("/proc/stb/vmpeg/0/pep_scaler_sharpness"):
def setScaler_sharpness(config):
myval = int(config.value)
try:
print "--> setting scaler_sharpness to: %0.8X" % myval
open("/proc/stb/vmpeg/0/pep_scaler_sharpness", "w").write("%0.8X" % myval)
open("/proc/stb/vmpeg/0/pep_apply", "w").write("1")
except IOError:
print "couldn't write pep_scaler_sharpness"
config.av.scaler_sharpness = ConfigSlider(default=13, limits=(0,26))
config.av.scaler_sharpness.addNotifier(setScaler_sharpness)
else:
config.av.scaler_sharpness = NoSave(ConfigNothing())
if SystemInfo["HasMultichannelPCM"]:
def setMultichannelPCM(configElement):
open(SystemInfo["HasMultichannelPCM"], "w").write(configElement.value and "enable" or "disable")
config.av.multichannel_pcm = ConfigYesNo(default = False)
config.av.multichannel_pcm.addNotifier(setMultichannelPCM)
if SystemInfo["HasAutoVolume"]:
def setAutoVolume(configElement):
open(SystemInfo["HasAutoVolume"], "w").write(configElement.value)
config.av.autovolume = ConfigSelection(default = "none", choices = [("none", _("off")), ("hdmi", _("HDMI")), ("spdif", _("SPDIF")), ("dac", _("DAC"))])
config.av.autovolume.addNotifier(setAutoVolume)
if SystemInfo["HasAutoVolumeLevel"]:
def setAutoVolumeLevel(configElement):
open(SystemInfo["HasAutoVolumeLevel"], "w").write(configElement.value and "enabled" or "disabled")
config.av.autovolumelevel = ConfigYesNo(default = False)
config.av.autovolumelevel.addNotifier(setAutoVolumeLevel)
if SystemInfo["Has3DSurround"]:
def set3DSurround(configElement):
open(SystemInfo["Has3DSurround"], "w").write(configElement.value)
config.av.surround_3d = ConfigSelection(default = "none", choices = [("none", _("off")), ("hdmi", _("HDMI")), ("spdif", _("SPDIF")), ("dac", _("DAC"))])
config.av.surround_3d.addNotifier(set3DSurround)
if SystemInfo["Has3DSpeaker"]:
def set3DSpeaker(configElement):
open(SystemInfo["Has3DSpeaker"], "w").write(configElement.value)
config.av.speaker_3d = ConfigSelection(default = "center", choices = [("center", _("center")), ("wide", _("wide")), ("extrawide", _("extra wide"))])
config.av.speaker_3d.addNotifier(set3DSpeaker)
if SystemInfo["Has3DSurroundSpeaker"]:
def set3DSurroundSpeaker(configElement):
open(SystemInfo["Has3DSurroundSpeaker"], "w").write(configElement.value)
config.av.surround_3d_speaker = ConfigSelection(default = "disabled", choices = [("disabled", _("off")), ("center", _("center")), ("wide", _("wide")), ("extrawide", _("extra wide"))])
config.av.surround_3d_speaker.addNotifier(set3DSurroundSpeaker)
if SystemInfo["Has3DSurroundSoftLimiter"]:
def set3DSurroundSoftLimiter(configElement):
open(SystemInfo["Has3DSurroundSoftLimiter"], "w").write(configElement.value and "enabled" or "disabled")
config.av.surround_softlimiter_3d = ConfigYesNo(default = False)
config.av.surround_softlimiter_3d.addNotifier(set3DSurroundSoftLimiter)
def setVolumeStepsize(configElement):
eDVBVolumecontrol.getInstance().setVolumeSteps(int(configElement.value))
config.av.volume_stepsize = ConfigSelectionNumber(1, 10, 1, default = 5)
config.av.volume_stepsize.addNotifier(setVolumeStepsize)
| gpl-2.0 | -5,518,978,411,526,034,000 | 40.206226 | 185 | 0.706232 | false |
darkonie/dcos | dcos_installer/prettyprint.py | 4 | 3850 | import json
import logging
import pprint
import re
log = logging.getLogger(__name__)
def print_header(string):
delimiter = '====>'
log.warning('{:5s} {:6s}'.format(delimiter, string))
class PrettyPrint():
"""
Pretty prints the output from the deployment process.
"""
def __init__(self, output):
self.output = output
self.fail_hosts = []
self.success_hosts = []
self.preflight = False
def beautify(self, mode='print_data_basic'):
self.failed_data, self.success_data = self.find_data(self.output)
getattr(self, mode)()
return self.failed_data, self.success_data
def find_data(self, data):
failed_data = []
success_data = []
for hosts in data:
for host in hosts:
for ip, results in host.items():
if results['returncode'] == 0:
if ip not in self.success_hosts:
self.success_hosts.append(ip)
success_data.append(host)
else:
if ip not in self.fail_hosts:
self.fail_hosts.append(ip)
failed_data.append(host)
# Remove failed from success hosts
self.success_hosts = [ip for ip in self.success_hosts if ip not in self.fail_hosts]
return failed_data, success_data
def _print_host_set(self, status, hosts):
if len(hosts) > 0:
for host in hosts:
for ip, data in host.items():
log = logging.getLogger(str(ip))
log.error('====> {} {}'.format(ip, status))
log.debug(' CODE:\n{}'.format(data['returncode']))
log.error(' TASK:\n{}'.format(' '.join(data['cmd'])))
log.error(' STDERR:')
self.color_preflight(host=ip, rc=data['returncode'], data_array=data['stderr'])
log.error(' STDOUT:')
self.color_preflight(host=ip, rc=data['returncode'], data_array=data['stdout'])
log.info('')
def print_data(self):
print_header('OUTPUT FOR {}'.format(self.stage_name))
self._print_host_set("FAILED", self.failed_data)
self._print_host_set("PASSED", self.success_data)
def print_summary(self):
print_header('SUMMARY FOR {}'.format(self.stage_name))
total = len(self.fail_hosts) + len(self.success_hosts)
err_msg = '{} out of {} hosts successfully completed {} stage.'
log.warning(err_msg.format(len(self.success_hosts), total, self.stage_name))
if len(self.fail_hosts) > 0:
log.error('The following hosts had failures detected during {} stage:'.format(self.stage_name))
for host in self.fail_hosts:
log.error(' {} failures detected.'.format(host))
print_header('END OF SUMMARY FOR {}'.format(self.stage_name))
def color_preflight(self, host='NULL', rc=0, data_array=[]):
"""
A subroutine to parse the output from the dcos_install.sh script's pass or fail
output.
"""
log = logging.getLogger(host)
does_pass = re.compile('PASS')
does_fail = re.compile('FAIL')
for line in data_array:
if line is not None and line != '':
if does_pass.search(line):
log.debug(' {}'.format(line))
elif does_fail.search(line):
log.error(' {}'.format(line))
elif rc != 0:
log.error(' {}'.format(line))
else:
log.debug(' {}'.format(line))
def print_json(self):
pprint.pprint(json.dumps(self.output))
| apache-2.0 | -3,003,576,329,997,714,000 | 36.378641 | 107 | 0.524156 | false |
jaharkes/home-assistant | homeassistant/components/notify/ios.py | 10 | 3658 | """
iOS push notification platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/ecosystem/ios/notifications/
"""
import logging
from datetime import datetime, timezone
import requests
from homeassistant.components import ios
import homeassistant.util.dt as dt_util
from homeassistant.components.notify import (
ATTR_TARGET, ATTR_TITLE, ATTR_TITLE_DEFAULT, ATTR_MESSAGE,
ATTR_DATA, BaseNotificationService)
_LOGGER = logging.getLogger(__name__)
PUSH_URL = "https://ios-push.home-assistant.io/push"
DEPENDENCIES = ["ios"]
# pylint: disable=invalid-name
def log_rate_limits(target, resp, level=20):
"""Output rate limit log line at given level."""
rate_limits = resp["rateLimits"]
resetsAt = dt_util.parse_datetime(rate_limits["resetsAt"])
resetsAtTime = resetsAt - datetime.now(timezone.utc)
rate_limit_msg = ("iOS push notification rate limits for %s: "
"%d sent, %d allowed, %d errors, "
"resets in %s")
_LOGGER.log(level, rate_limit_msg,
ios.device_name_for_push_id(target),
rate_limits["successful"],
rate_limits["maximum"], rate_limits["errors"],
str(resetsAtTime).split(".")[0])
def get_service(hass, config):
"""Get the iOS notification service."""
if "notify.ios" not in hass.config.components:
# Need this to enable requirements checking in the app.
hass.config.components.append("notify.ios")
if not ios.devices_with_push():
_LOGGER.error(("The notify.ios platform was loaded but no "
"devices exist! Please check the documentation at "
"https://home-assistant.io/ecosystem/ios/notifications"
"/ for more information"))
return None
return iOSNotificationService()
class iOSNotificationService(BaseNotificationService):
"""Implement the notification service for iOS."""
def __init__(self):
"""Initialize the service."""
@property
def targets(self):
"""Return a dictionary of registered targets."""
return ios.devices_with_push()
def send_message(self, message="", **kwargs):
"""Send a message to the Lambda APNS gateway."""
data = {ATTR_MESSAGE: message}
if kwargs.get(ATTR_TITLE) is not None:
# Remove default title from notifications.
if kwargs.get(ATTR_TITLE) != ATTR_TITLE_DEFAULT:
data[ATTR_TITLE] = kwargs.get(ATTR_TITLE)
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = ios.enabled_push_ids()
if kwargs.get(ATTR_DATA) is not None:
data[ATTR_DATA] = kwargs.get(ATTR_DATA)
for target in targets:
data[ATTR_TARGET] = target
req = requests.post(PUSH_URL, json=data, timeout=10)
if req.status_code != 201:
fallback_error = req.json().get("errorMessage",
"Unknown error")
fallback_message = ("Internal server error, "
"please try again later: "
"{}").format(fallback_error)
message = req.json().get("message", fallback_message)
if req.status_code == 429:
_LOGGER.warning(message)
log_rate_limits(target, req.json(), 30)
else:
_LOGGER.error(message)
else:
log_rate_limits(target, req.json())
| mit | -4,160,635,206,843,412,500 | 34.173077 | 78 | 0.591853 | false |
skosukhin/spack | lib/spack/spack/provider_index.py | 1 | 10997 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
The ``virtual`` module contains utility classes for virtual dependencies.
"""
from itertools import product as iproduct
from six import iteritems
from pprint import pformat
import spack.util.spack_yaml as syaml
from yaml.error import MarkedYAMLError
import spack
import spack.error
class ProviderIndex(object):
"""This is a dict of dicts used for finding providers of particular
virtual dependencies. The dict of dicts looks like:
{ vpkg name :
{ full vpkg spec : set(packages providing spec) } }
Callers can use this to first find which packages provide a vpkg,
then find a matching full spec. e.g., in this scenario:
{ 'mpi' :
{ mpi@:1.1 : set([mpich]),
mpi@:2.3 : set([[email protected]:]) } }
Calling providers_for(spec) will find specs that provide a
matching implementation of MPI.
"""
def __init__(self, specs=None, restrict=False):
"""Create a new ProviderIndex.
Optional arguments:
specs
List (or sequence) of specs. If provided, will call
`update` on this ProviderIndex with each spec in the list.
restrict
"restricts" values to the verbatim input specs; do not
pre-apply package's constraints.
TODO: rename this. It is intended to keep things as broad
as possible without overly restricting results, so it is
not the best name.
"""
if specs is None:
specs = []
self.restrict = restrict
self.providers = {}
for spec in specs:
if not isinstance(spec, spack.spec.Spec):
spec = spack.spec.Spec(spec)
if spec.virtual:
continue
self.update(spec)
def update(self, spec):
if not isinstance(spec, spack.spec.Spec):
spec = spack.spec.Spec(spec)
if not spec.name:
# Empty specs do not have a package
return
assert(not spec.virtual)
pkg_provided = spec.package_class.provided
for provided_spec, provider_specs in iteritems(pkg_provided):
for provider_spec in provider_specs:
# TODO: fix this comment.
# We want satisfaction other than flags
provider_spec.compiler_flags = spec.compiler_flags.copy()
if spec.satisfies(provider_spec, deps=False):
provided_name = provided_spec.name
provider_map = self.providers.setdefault(provided_name, {})
if provided_spec not in provider_map:
provider_map[provided_spec] = set()
if self.restrict:
provider_set = provider_map[provided_spec]
# If this package existed in the index before,
# need to take the old versions out, as they're
# now more constrained.
old = set(
[s for s in provider_set if s.name == spec.name])
provider_set.difference_update(old)
# Now add the new version.
provider_set.add(spec)
else:
# Before putting the spec in the map, constrain
# it so that it provides what was asked for.
constrained = spec.copy()
constrained.constrain(provider_spec)
provider_map[provided_spec].add(constrained)
def providers_for(self, *vpkg_specs):
"""Gives specs of all packages that provide virtual packages
with the supplied specs."""
providers = set()
for vspec in vpkg_specs:
# Allow string names to be passed as input, as well as specs
if type(vspec) == str:
vspec = spack.spec.Spec(vspec)
# Add all the providers that satisfy the vpkg spec.
if vspec.name in self.providers:
for p_spec, spec_set in self.providers[vspec.name].items():
if p_spec.satisfies(vspec, deps=False):
providers.update(spec_set)
# Return providers in order. Defensively copy.
return sorted(s.copy() for s in providers)
# TODO: this is pretty darned nasty, and inefficient, but there
# are not that many vdeps in most specs.
def _cross_provider_maps(self, lmap, rmap):
result = {}
for lspec, rspec in iproduct(lmap, rmap):
try:
constrained = lspec.constrained(rspec)
except spack.spec.UnsatisfiableSpecError:
continue
# lp and rp are left and right provider specs.
for lp_spec, rp_spec in iproduct(lmap[lspec], rmap[rspec]):
if lp_spec.name == rp_spec.name:
try:
const = lp_spec.constrained(rp_spec, deps=False)
result.setdefault(constrained, set()).add(const)
except spack.spec.UnsatisfiableSpecError:
continue
return result
def __contains__(self, name):
"""Whether a particular vpkg name is in the index."""
return name in self.providers
def satisfies(self, other):
"""Check that providers of virtual specs are compatible."""
common = set(self.providers) & set(other.providers)
if not common:
return True
# This ensures that some provider in other COULD satisfy the
# vpkg constraints on self.
result = {}
for name in common:
crossed = self._cross_provider_maps(self.providers[name],
other.providers[name])
if crossed:
result[name] = crossed
return all(c in result for c in common)
def to_yaml(self, stream=None):
provider_list = self._transform(
lambda vpkg, pset: [
vpkg.to_node_dict(), [p.to_node_dict() for p in pset]], list)
syaml.dump({'provider_index': {'providers': provider_list}},
stream=stream)
@staticmethod
def from_yaml(stream):
try:
yfile = syaml.load(stream)
except MarkedYAMLError as e:
raise spack.spec.SpackYAMLError(
"error parsing YAML ProviderIndex cache:", str(e))
if not isinstance(yfile, dict):
raise ProviderIndexError("YAML ProviderIndex was not a dict.")
if 'provider_index' not in yfile:
raise ProviderIndexError(
"YAML ProviderIndex does not start with 'provider_index'")
index = ProviderIndex()
providers = yfile['provider_index']['providers']
index.providers = _transform(
providers,
lambda vpkg, plist: (
spack.spec.Spec.from_node_dict(vpkg),
set(spack.spec.Spec.from_node_dict(p) for p in plist)))
return index
def merge(self, other):
"""Merge `other` ProviderIndex into this one."""
other = other.copy() # defensive copy.
for pkg in other.providers:
if pkg not in self.providers:
self.providers[pkg] = other.providers[pkg]
continue
spdict, opdict = self.providers[pkg], other.providers[pkg]
for provided_spec in opdict:
if provided_spec not in spdict:
spdict[provided_spec] = opdict[provided_spec]
continue
spdict[provided_spec] = \
spdict[provided_spec].union(opdict[provided_spec])
def remove_provider(self, pkg_name):
"""Remove a provider from the ProviderIndex."""
empty_pkg_dict = []
for pkg, pkg_dict in self.providers.items():
empty_pset = []
for provided, pset in pkg_dict.items():
same_name = set(p for p in pset if p.fullname == pkg_name)
pset.difference_update(same_name)
if not pset:
empty_pset.append(provided)
for provided in empty_pset:
del pkg_dict[provided]
if not pkg_dict:
empty_pkg_dict.append(pkg)
for pkg in empty_pkg_dict:
del self.providers[pkg]
def copy(self):
"""Deep copy of this ProviderIndex."""
clone = ProviderIndex()
clone.providers = self._transform(
lambda vpkg, pset: (vpkg, set((p.copy() for p in pset))))
return clone
def __eq__(self, other):
return self.providers == other.providers
def _transform(self, transform_fun, out_mapping_type=dict):
return _transform(self.providers, transform_fun, out_mapping_type)
def __str__(self):
return pformat(
_transform(self.providers,
lambda k, v: (k, list(v))))
def _transform(providers, transform_fun, out_mapping_type=dict):
"""Syntactic sugar for transforming a providers dict.
transform_fun takes a (vpkg, pset) mapping and runs it on each
pair in nested dicts.
"""
def mapiter(mappings):
if isinstance(mappings, dict):
return iteritems(mappings)
else:
return iter(mappings)
return dict(
(name, out_mapping_type([
transform_fun(vpkg, pset) for vpkg, pset in mapiter(mappings)]))
for name, mappings in providers.items())
class ProviderIndexError(spack.error.SpackError):
"""Raised when there is a problem with a ProviderIndex."""
| lgpl-2.1 | 2,157,932,681,051,611,100 | 35.293729 | 79 | 0.573702 | false |
transcranial/gensim | gensim/corpora/wikicorpus.py | 37 | 13015 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Copyright (C) 2012 Lars Buitinck <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Construct a corpus from a Wikipedia (or other MediaWiki-based) database dump.
If you have the `pattern` package installed, this module will use a fancy
lemmatization to get a lemma of each token (instead of plain alphabetic
tokenizer). The package is available at https://github.com/clips/pattern .
See scripts/process_wiki.py for a canned (example) script based on this
module.
"""
import bz2
import logging
import re
from xml.etree.cElementTree import iterparse # LXML isn't faster, so let's go with the built-in solution
import multiprocessing
from gensim import utils
# cannot import whole gensim.corpora, because that imports wikicorpus...
from gensim.corpora.dictionary import Dictionary
from gensim.corpora.textcorpus import TextCorpus
logger = logging.getLogger('gensim.corpora.wikicorpus')
# ignore articles shorter than ARTICLE_MIN_WORDS characters (after full preprocessing)
ARTICLE_MIN_WORDS = 50
RE_P0 = re.compile('<!--.*?-->', re.DOTALL | re.UNICODE) # comments
RE_P1 = re.compile('<ref([> ].*?)(</ref>|/>)', re.DOTALL | re.UNICODE) # footnotes
RE_P2 = re.compile("(\n\[\[[a-z][a-z][\w-]*:[^:\]]+\]\])+$", re.UNICODE) # links to languages
RE_P3 = re.compile("{{([^}{]*)}}", re.DOTALL | re.UNICODE) # template
RE_P4 = re.compile("{{([^}]*)}}", re.DOTALL | re.UNICODE) # template
RE_P5 = re.compile('\[(\w+):\/\/(.*?)(( (.*?))|())\]', re.UNICODE) # remove URL, keep description
RE_P6 = re.compile("\[([^][]*)\|([^][]*)\]", re.DOTALL | re.UNICODE) # simplify links, keep description
RE_P7 = re.compile('\n\[\[[iI]mage(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE) # keep description of images
RE_P8 = re.compile('\n\[\[[fF]ile(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE) # keep description of files
RE_P9 = re.compile('<nowiki([> ].*?)(</nowiki>|/>)', re.DOTALL | re.UNICODE) # outside links
RE_P10 = re.compile('<math([> ].*?)(</math>|/>)', re.DOTALL | re.UNICODE) # math content
RE_P11 = re.compile('<(.*?)>', re.DOTALL | re.UNICODE) # all other tags
RE_P12 = re.compile('\n(({\|)|(\|-)|(\|}))(.*?)(?=\n)', re.UNICODE) # table formatting
RE_P13 = re.compile('\n(\||\!)(.*?\|)*([^|]*?)', re.UNICODE) # table cell formatting
RE_P14 = re.compile('\[\[Category:[^][]*\]\]', re.UNICODE) # categories
# Remove File and Image template
RE_P15 = re.compile('\[\[([fF]ile:|[iI]mage)[^]]*(\]\])', re.UNICODE)
def filter_wiki(raw):
"""
Filter out wiki mark-up from `raw`, leaving only text. `raw` is either unicode
or utf-8 encoded string.
"""
# parsing of the wiki markup is not perfect, but sufficient for our purposes
# contributions to improving this code are welcome :)
text = utils.to_unicode(raw, 'utf8', errors='ignore')
text = utils.decode_htmlentities(text) # '&nbsp;' --> '\xa0'
return remove_markup(text)
def remove_markup(text):
text = re.sub(RE_P2, "", text) # remove the last list (=languages)
# the wiki markup is recursive (markup inside markup etc)
# instead of writing a recursive grammar, here we deal with that by removing
# markup in a loop, starting with inner-most expressions and working outwards,
# for as long as something changes.
text = remove_template(text)
text = remove_file(text)
iters = 0
while True:
old, iters = text, iters + 1
text = re.sub(RE_P0, "", text) # remove comments
text = re.sub(RE_P1, '', text) # remove footnotes
text = re.sub(RE_P9, "", text) # remove outside links
text = re.sub(RE_P10, "", text) # remove math content
text = re.sub(RE_P11, "", text) # remove all remaining tags
text = re.sub(RE_P14, '', text) # remove categories
text = re.sub(RE_P5, '\\3', text) # remove urls, keep description
text = re.sub(RE_P6, '\\2', text) # simplify links, keep description only
# remove table markup
text = text.replace('||', '\n|') # each table cell on a separate line
text = re.sub(RE_P12, '\n', text) # remove formatting lines
text = re.sub(RE_P13, '\n\\3', text) # leave only cell content
# remove empty mark-up
text = text.replace('[]', '')
if old == text or iters > 2: # stop if nothing changed between two iterations or after a fixed number of iterations
break
# the following is needed to make the tokenizer see '[[socialist]]s' as a single word 'socialists'
# TODO is this really desirable?
text = text.replace('[', '').replace(']', '') # promote all remaining markup to plain text
return text
def remove_template(s):
"""Remove template wikimedia markup.
Return a copy of `s` with all the wikimedia markup template removed. See
http://meta.wikimedia.org/wiki/Help:Template for wikimedia templates
details.
Note: Since template can be nested, it is difficult remove them using
regular expresssions.
"""
# Find the start and end position of each template by finding the opening
# '{{' and closing '}}'
n_open, n_close = 0, 0
starts, ends = [], []
in_template = False
prev_c = None
for i, c in enumerate(iter(s)):
if not in_template:
if c == '{' and c == prev_c:
starts.append(i - 1)
in_template = True
n_open = 1
if in_template:
if c == '{':
n_open += 1
elif c == '}':
n_close += 1
if n_open == n_close:
ends.append(i)
in_template = False
n_open, n_close = 0, 0
prev_c = c
# Remove all the templates
s = ''.join([s[end + 1:start] for start, end in
zip(starts + [None], [-1] + ends)])
return s
def remove_file(s):
"""Remove the 'File:' and 'Image:' markup, keeping the file caption.
Return a copy of `s` with all the 'File:' and 'Image:' markup replaced by
their corresponding captions. See http://www.mediawiki.org/wiki/Help:Images
for the markup details.
"""
# The regex RE_P15 match a File: or Image: markup
for match in re.finditer(RE_P15, s):
m = match.group(0)
caption = m[:-2].split('|')[-1]
s = s.replace(m, caption, 1)
return s
def tokenize(content):
"""
Tokenize a piece of text from wikipedia. The input string `content` is assumed
to be mark-up free (see `filter_wiki()`).
Return list of tokens as utf8 bytestrings. Ignore words shorted than 2 or longer
that 15 characters (not bytes!).
"""
# TODO maybe ignore tokens with non-latin characters? (no chinese, arabic, russian etc.)
return [token.encode('utf8') for token in utils.tokenize(content, lower=True, errors='ignore')
if 2 <= len(token) <= 15 and not token.startswith('_')]
def get_namespace(tag):
"""Returns the namespace of tag."""
m = re.match("^{(.*?)}", tag)
namespace = m.group(1) if m else ""
if not namespace.startswith("http://www.mediawiki.org/xml/export-"):
raise ValueError("%s not recognized as MediaWiki dump namespace"
% namespace)
return namespace
_get_namespace = get_namespace
def extract_pages(f, filter_namespaces=False):
"""
Extract pages from MediaWiki database dump.
Return an iterable over (str, str, str) which generates (title, content, pageid) triplets.
"""
elems = (elem for _, elem in iterparse(f, events=("end",)))
# We can't rely on the namespace for database dumps, since it's changed
# it every time a small modification to the format is made. So, determine
# those from the first element we find, which will be part of the metadata,
# and construct element paths.
elem = next(elems)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
page_tag = "{%(ns)s}page" % ns_mapping
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
ns_path = "./{%(ns)s}ns" % ns_mapping
pageid_path = "./{%(ns)s}id" % ns_mapping
for elem in elems:
if elem.tag == page_tag:
title = elem.find(title_path).text
text = elem.find(text_path).text
ns = elem.find(ns_path).text
if filter_namespaces and ns not in filter_namespaces:
text = None
pageid = elem.find(pageid_path).text
yield title, text or "", pageid # empty page will yield None
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# except that we don't need to prune backlinks from the parent
# because we don't use LXML.
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. The pages comprise the bulk of the
# file, so in practice we prune away enough.
elem.clear()
_extract_pages = extract_pages # for backward compatibility
def process_article(args):
"""
Parse a wikipedia article, returning its content as a list of tokens
(utf8-encoded strings).
"""
text, lemmatize, title, pageid = args
text = filter_wiki(text)
if lemmatize:
result = utils.lemmatize(text)
else:
result = tokenize(text)
return result, title, pageid
class WikiCorpus(TextCorpus):
"""
Treat a wikipedia articles dump (\*articles.xml.bz2) as a (read-only) corpus.
The documents are extracted on-the-fly, so that the whole (massive) dump
can stay compressed on disk.
>>> wiki = WikiCorpus('enwiki-20100622-pages-articles.xml.bz2') # create word->word_id mapping, takes almost 8h
>>> MmCorpus.serialize('wiki_en_vocab200k', wiki) # another 8h, creates a file in MatrixMarket format plus file with id->word
"""
def __init__(self, fname, processes=None, lemmatize=utils.HAS_PATTERN, dictionary=None, filter_namespaces=('0',)):
"""
Initialize the corpus. Unless a dictionary is provided, this scans the
corpus once, to determine its vocabulary.
If `pattern` package is installed, use fancier shallow parsing to get
token lemmas. Otherwise, use simple regexp tokenization. You can override
this automatic logic by forcing the `lemmatize` parameter explicitly.
"""
self.fname = fname
self.filter_namespaces = filter_namespaces
self.metadata = False
if processes is None:
processes = max(1, multiprocessing.cpu_count() - 1)
self.processes = processes
self.lemmatize = lemmatize
if dictionary is None:
self.dictionary = Dictionary(self.get_texts())
else:
self.dictionary = dictionary
def get_texts(self):
"""
Iterate over the dump, returning text version of each article as a list
of tokens.
Only articles of sufficient length are returned (short articles & redirects
etc are ignored).
Note that this iterates over the **texts**; if you want vectors, just use
the standard corpus interface instead of this function::
>>> for vec in wiki_corpus:
>>> print(vec)
"""
articles, articles_all = 0, 0
positions, positions_all = 0, 0
texts = ((text, self.lemmatize, title, pageid) for title, text, pageid in extract_pages(bz2.BZ2File(self.fname), self.filter_namespaces))
pool = multiprocessing.Pool(self.processes)
# process the corpus in smaller chunks of docs, because multiprocessing.Pool
# is dumb and would load the entire input into RAM at once...
ignore_namespaces = 'Wikipedia Category File Portal Template MediaWiki User Help Book Draft'.split()
for group in utils.chunkize(texts, chunksize=10 * self.processes, maxsize=1):
for tokens, title, pageid in pool.imap(process_article, group): # chunksize=10):
articles_all += 1
positions_all += len(tokens)
# article redirects and short stubs are pruned here
if len(tokens) < ARTICLE_MIN_WORDS or any(title.startswith(ignore + ':') for ignore in ignore_namespaces):
continue
articles += 1
positions += len(tokens)
if self.metadata:
yield (tokens, (pageid, title))
else:
yield tokens
pool.terminate()
logger.info("finished iterating over Wikipedia corpus of %i documents with %i positions"
" (total %i articles, %i positions before pruning articles shorter than %i words)" %
(articles, positions, articles_all, positions_all, ARTICLE_MIN_WORDS))
self.length = articles # cache corpus length
# endclass WikiCorpus
| gpl-3.0 | 4,490,660,027,610,061,300 | 40.449045 | 145 | 0.619516 | false |
lauralwatkins/voronoi | voronoi/bin2d.py | 1 | 4903 | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# VORONOI.BIN2D
# Laura L Watkins [[email protected]]
# - converted from IDL code by Michele Cappellari (voronoi_2d_binning)
# -----------------------------------------------------------------------------
from numpy import *
from matplotlib.pyplot import *
from .weighted_centroid import *
from .bin_roundness import *
from .accretion import *
from .reassign_bad_bins import *
from .cvt_equal_mass import *
from .bin_quantities import *
def bin2d(x, y, signal, noise, targetsn, cvt=True, wvt=False, quiet=True,
graphs=True):
"""
This is the main program that has to be called from external programs.
It simply calls in sequence the different steps of the algorithms
and optionally plots the results at the end of the calculation.
INPUTS
x : x-coordinates of pixels
y : y-coordinates of pixels
signal : signal in pixels
noise : noise in pixels
targetsn : target S/N required
OPTIONS
cvt : use Modified-Lloyd algorithm [default True]
wvt : use additional modification by Diehl & Statler [default False]
quiet : supress output [default True]
graphs : show results graphically [default True]
"""
npix = x.size
if y.size != x.size or signal.size != x.size or noise.size != x.size:
print("ERROR: input vectors (x, y, signal, noise) must have same size")
return
if any(noise < 0):
print("ERROR: noise cannot be negative")
return
# prevent division by zero for pixels with signal=0 and
# noise=sqrt(signal)=0 as can happen with X-ray data
noise[noise==0] = noise[noise>0].min() * 1e-9
# Perform basic tests to catch common input errors
if signal.sum()/sqrt((noise**2).sum()) < targetsn:
print("Not enough S/N in the whole set of pixels. " \
+ "Many pixels may have noise but virtually no signal. " \
+ "They should not be included in the set to bin, " \
+ "or the pixels should be optimally weighted." \
+ "See Cappellari & Copin (2003, Sec.2.1) and README file.")
return
if (signal/noise).min() > targetsn:
print("EXCEPTION: all pixels have enough S/N -- binning not needed")
return
if not quiet: print("Bin-accretion...")
clas = accretion(x, y, signal, noise, targetsn, quiet=quiet)
if not quiet: print("{:} initial bins\n".format(clas.max()))
if not quiet: print("Reassign bad bins...")
xnode, ynode = reassign_bad_bins(x, y, signal, noise, targetsn, clas)
if not quiet: print("{:} good bins\n".format(xnode.size))
if cvt:
if not quiet: print("Modified Lloyd algorithm...")
scale, iters = cvt_equal_mass(x, y, signal, noise, xnode, ynode,
quiet=quiet, wvt=wvt)
if not quiet: print(" iterations: {:}".format(iters-1))
else:
scale = 1.
if not quiet: print("Recompute bin properties...")
clas, xbar, ybar, sn, area = bin_quantities(x, y, signal, noise, xnode,
ynode, scale)
unb = area==1
binned = area!=1
if not quiet: print("Unbinned pixels: {:} / {:}".format(sum(unb), npix))
fracscat = ((sn[binned]-targetsn)/targetsn*100).std()
if not quiet: print("Fractional S/N scatter (%):", fracscat)
if graphs:
# set up plotting
rc("font", family="serif")
rc("text", usetex=True)
rc("xtick", labelsize="8")
rc("ytick", labelsize="8")
rc("axes", labelsize="10")
rc("legend", fontsize="9")
# pixel map
fig = figure(figsize=(4,3))
fig.subplots_adjust(left=0.13, bottom=0.13, top=0.97, right=0.98)
rnd = random.rand(xnode.size).argsort() # randomize bin colors
scatter(x, y, lw=0, c=rnd[clas])
plot(xnode, ynode, "k+", ms=2)
xlim(x.min()-x.ptp()*0.05, x.max()+x.ptp()*0.05)
ylim(y.min()-y.ptp()*0.05, y.max()+y.ptp()*0.05)
xlabel("coordinate 1")
ylabel("coordinate 2")
show()
# signal-to-noise profile
fig = figure(figsize=(4,3))
fig.subplots_adjust(left=0.12, bottom=0.13, top=0.97, right=0.97)
rad = sqrt(xbar**2 + ybar**2) # use centroids, NOT generators
rmin = max(0., rad.min()-rad.ptp()*0.05)
rmax = rad.max()+rad.ptp()*0.05
plot([rmin, rmax], ones(2)*targetsn, c="k", lw=2, alpha=0.8)
scatter(rad[binned], sn[binned], lw=0, c="b", alpha=0.8)
if unb.size > 0: scatter(rad[unb], sn[unb], lw=0, c="r", alpha=0.8)
xlim(rmin, rmax)
ylim(0., sn.max()*1.05)
xlabel(r"$R_{\rm bin}$")
ylabel(r"$SN_{\rm bin}$")
show()
return clas, xnode, ynode, sn, area, scale
| bsd-2-clause | -17,399,896,273,318,952 | 37.015504 | 79 | 0.566796 | false |
sadjadasghari/deeplab4a2d | segmenter.py | 1 | 1421 | #!/usr/bin/env python
# Martin Kersner, [email protected]
# 2016/03/11
# Segmenter is an image segmentation specialization of Net.
# Inspired by https://github.com/torrvision/crfasrnn/blob/master/caffe-crfrnn/python/caffe/segmenter.py
import numpy as np
caffe_root = 'code/'
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
class Segmenter(caffe.Net):
def __init__(self, prototxt, model, gpu_id=-1):
caffe.Net.__init__(self, prototxt, model)
self.set_phase_test()
if gpu_id < 0:
self.set_mode_cpu()
else:
self.set_mode_gpu()
self.set_device(gpu_id)
def predict(self, inputs):
# uses MEMORY_DATA layer for loading images and postprocessing DENSE_CRF layer
img = inputs[0].transpose((2, 0, 1))
img = img[np.newaxis, :].astype(np.float32)
label = np.zeros((1, 1, 1, 1), np.float32)
data_dim = np.zeros((1, 1, 1, 2), np.float32)
data_dim[0][0][0][0] = img.shape[2]
data_dim[0][0][0][1] = img.shape[3]
img = np.ascontiguousarray(img, dtype=np.float32)
label = np.ascontiguousarray(label, dtype=np.float32)
data_dim = np.ascontiguousarray(data_dim, dtype=np.float32)
self.set_input_arrays(img, label, data_dim)
out = self.forward()
predictions = out[self.outputs[0]] # the output layer should be called crf_inf
segm_result = predictions[0].argmax(axis=0).astype(np.uint8)
return segm_result
| gpl-3.0 | 2,710,231,346,983,501,000 | 30.577778 | 103 | 0.665025 | false |
maxamillion/dnf | tests/test_cli.py | 1 | 10559 | # -*- coding: utf-8 -*-
# Copyright (C) 2012-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import os
import re
from argparse import Namespace
import dnf.cli.cli
import dnf.conf
import dnf.goal
import dnf.repo
import dnf.repodict
import tests.support
from tests.support import mock
VERSIONS_OUTPUT = """\
Installed: pepper-0:20-0.x86_64 at Thu Jan 1 00:00:00 1970
Built : at Thu Jan 1 00:00:00 1970
Installed: tour-0:5-0.noarch at Thu Jan 1 00:00:00 1970
Built : at Thu Jan 1 00:00:00 1970
"""
class VersionStringTest(tests.support.DnfBaseTestCase):
REPOS = []
def test_print_versions(self):
output = tests.support.MockOutput()
with mock.patch('sys.stdout') as stdout,\
mock.patch('dnf.sack._rpmdb_sack', return_value=self.base.sack):
dnf.cli.cli.print_versions(['pepper', 'tour'], self.base, output)
written = ''.join([mc[1][0] for mc in stdout.method_calls
if mc[0] == 'write'])
self.assertEqual(written, VERSIONS_OUTPUT)
@mock.patch('dnf.cli.cli.logger', new_callable=tests.support.mock_logger)
class BaseCliTest(tests.support.ResultTestCase):
REPOS = ["main", "updates"]
BASE_CLI = True
INIT_SACK = True
def setUp(self):
super(BaseCliTest, self).setUp()
self.base.output.term = tests.support.MockTerminal()
self.base.downgrade_to = mock.Mock(wraps=self.base.downgrade_to)
def test_downgradePkgs(self, logger):
self.base.downgradePkgs(('tour',))
self.assertEqual(self.base.downgrade_to.mock_calls, [mock.call('tour', strict=False)])
self.assertEqual(logger.mock_calls, [])
def test_downgradePkgs_notfound(self, logger):
with self.assertRaises(dnf.exceptions.Error) as ctx:
self.base.downgradePkgs(('non-existent',))
self.assertEqual(str(ctx.exception), 'No packages marked for downgrade.')
self.assertEqual(self.base.downgrade_to.mock_calls,
[mock.call('non-existent', strict=False)])
self.assertEqual(logger.mock_calls,
[mock.call.info('No package %s available.',
'non-existent')])
@mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)
def test_downgradePkgs_notinstalled(self, logger):
tests.support.ObjectMatcher(dnf.package.Package, {'name': 'lotus'})
with self.assertRaises(dnf.exceptions.Error) as ctx:
self.base.downgradePkgs(('lotus',))
self.assertEqual(str(ctx.exception), 'No packages marked for downgrade.')
self.assertEqual(self.base.downgrade_to.mock_calls, [mock.call('lotus', strict=False)])
@mock.patch('dnf.cli.cli.Cli._read_conf_file')
class CliTest(tests.support.DnfBaseTestCase):
REPOS = ["main"]
CLI = "init"
def setUp(self):
super(CliTest, self).setUp()
self.base.output = tests.support.MockOutput()
def test_knows_upgrade(self, _):
upgrade = self.cli.cli_commands['upgrade']
update = self.cli.cli_commands['update']
self.assertIs(upgrade, update)
def test_simple(self, _):
self.assertFalse(self.base.conf.assumeyes)
self.cli.configure(['update', '-y'])
self.assertTrue(self.base.conf.assumeyes)
def test_glob_options_cmds(self, _):
params = [
['install', '-y', 'pkg1', 'pkg2'],
['install', 'pkg1', '-y', 'pkg2'],
['install', 'pkg1', 'pkg2', '-y'],
['-y', 'install', 'pkg1', 'pkg2']
]
for param in params:
self.cli.configure(args=param)
self.assertTrue(self.base.conf.assumeyes)
self.assertEqual(self.cli.command.opts.command, "install")
self.assertEqual(self.cli.command.opts.pkg_specs, ["pkg1", "pkg2"])
def test_configure_repos(self, _):
opts = Namespace()
opts.repo = []
opts.repos_ed = [('*', 'disable'), ('comb', 'enable')]
opts.cacheonly = True
opts.nogpgcheck = True
opts.repofrompath = {}
self.base._repos = dnf.repodict.RepoDict()
self.base._repos.add(tests.support.MockRepo('one', self.base.conf))
self.base._repos.add(tests.support.MockRepo('two', self.base.conf))
self.base._repos.add(tests.support.MockRepo('comb', self.base.conf))
self.cli._configure_repos(opts)
self.assertFalse(self.base.repos['one'].enabled)
self.assertFalse(self.base.repos['two'].enabled)
self.assertTrue(self.base.repos['comb'].enabled)
self.assertFalse(self.base.repos["comb"].gpgcheck)
self.assertFalse(self.base.repos["comb"].repo_gpgcheck)
def test_configure_repos_expired(self, _):
"""Ensure that --cacheonly beats the expired status."""
opts = Namespace()
opts.repo = []
opts.repos_ed = []
opts.cacheonly = True
opts.repofrompath = {}
pers = self.base._repo_persistor
pers.get_expired_repos = mock.Mock(return_value=('one',))
self.base._repos = dnf.repodict.RepoDict()
self.base._repos.add(tests.support.MockRepo('one', self.base.conf))
self.cli._configure_repos(opts)
# _process_demands() should respect --cacheonly in spite of modified demands
self.cli.demands.fresh_metadata = False
self.cli.demands.cacheonly = True
self.cli._process_demands()
self.assertEqual(self.base.repos['one']._repo.getSyncStrategy(),
dnf.repo.SYNC_ONLY_CACHE)
@mock.patch('dnf.logging.Logging._setup', new=mock.MagicMock)
class ConfigureTest(tests.support.DnfBaseTestCase):
REPOS = ["main"]
# CLI = "init"
def setUp(self):
super(ConfigureTest, self).setUp()
self.base._conf = dnf.conf.Conf()
self.base.output = tests.support.MockOutput()
self.base._plugins = mock.Mock()
self.cli = dnf.cli.cli.Cli(self.base)
self.cli.command = mock.Mock()
self.conffile = os.path.join(tests.support.dnf_toplevel(), "etc/dnf/dnf.conf")
@mock.patch('dnf.util.am_i_root', lambda: False)
def test_configure_user(self):
""" Test Cli.configure as user."""
self.base._conf = dnf.conf.Conf()
with mock.patch('dnf.rpm.detect_releasever', return_value=69):
self.cli.configure(['update', '-c', self.conffile])
reg = re.compile('^/var/tmp/dnf-[.a-zA-Z0-9_-]+$')
self.assertIsNotNone(reg.match(self.base.conf.cachedir))
parser = argparse.ArgumentParser()
expected = "%s update -c %s " % (parser.prog, self.conffile)
self.assertEqual(self.cli.cmdstring, expected)
@mock.patch('dnf.util.am_i_root', lambda: True)
def test_configure_root(self):
""" Test Cli.configure as root."""
self.base._conf = dnf.conf.Conf()
with mock.patch('dnf.rpm.detect_releasever', return_value=69):
self.cli.configure(['update', '--nogpgcheck', '-c', self.conffile])
reg = re.compile('^/var/cache/dnf$')
self.assertIsNotNone(reg.match(self.base.conf.system_cachedir))
parser = argparse.ArgumentParser()
expected = "%s update --nogpgcheck -c %s " % (parser.prog, self.conffile)
self.assertEqual(self.cli.cmdstring, expected)
def test_configure_verbose(self):
with mock.patch('dnf.rpm.detect_releasever', return_value=69):
self.cli.configure(['-v', 'update', '-c', self.conffile])
parser = argparse.ArgumentParser()
expected = "%s -v update -c %s " % (parser.prog, self.conffile)
self.assertEqual(self.cli.cmdstring, expected)
self.assertEqual(self.base.conf.debuglevel, 6)
self.assertEqual(self.base.conf.errorlevel, 6)
@mock.patch('dnf.cli.cli.Cli._parse_commands', new=mock.MagicMock)
@mock.patch('os.path.exists', return_value=True)
def test_conf_exists_in_installroot(self, ospathexists):
with mock.patch('logging.Logger.warning'), \
mock.patch('dnf.rpm.detect_releasever', return_value=69):
self.cli.configure(['--installroot', '/roots/dnf', 'update'])
self.assertEqual(self.base.conf.config_file_path, '/roots/dnf/etc/dnf/dnf.conf')
self.assertEqual(self.base.conf.installroot, '/roots/dnf')
@mock.patch('dnf.cli.cli.Cli._parse_commands', new=mock.MagicMock)
@mock.patch('os.path.exists', return_value=False)
def test_conf_notexists_in_installroot(self, ospathexists):
with mock.patch('dnf.rpm.detect_releasever', return_value=69):
self.cli.configure(['--installroot', '/roots/dnf', 'update'])
self.assertEqual(self.base.conf.config_file_path, '/etc/dnf/dnf.conf')
self.assertEqual(self.base.conf.installroot, '/roots/dnf')
@mock.patch('dnf.cli.cli.Cli._parse_commands', new=mock.MagicMock)
def test_installroot_with_etc(self):
"""Test that conffile is detected in a new installroot."""
self.base.extcmds = []
tlv = tests.support.dnf_toplevel()
self.cli.configure(['--installroot', tlv, 'update'])
self.assertEqual(self.base.conf.config_file_path, '%s/etc/dnf/dnf.conf' % tlv)
def test_installroot_configurable(self):
"""Test that conffile is detected in a new installroot."""
conf = os.path.join(tests.support.dnf_toplevel(), "tests/etc/installroot.conf")
self.cli.configure(['-c', conf, '--nogpgcheck', '--releasever', '17', 'update'])
self.assertEqual(self.base.conf.installroot, '/roots/dnf')
| gpl-2.0 | 6,050,709,037,758,982,000 | 40.900794 | 95 | 0.641538 | false |
pombredanne/invenio-old | modules/webaccess/lib/access_control_firerole.py | 3 | 14461 | ## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""CDS Invenio Access Control FireRole."""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
"""These functions are for realizing a firewall like role definition for extending
webaccess to connect user to roles using every infos about users.
"""
import re
import cPickle
from zlib import compress, decompress
import sys
if sys.hexversion < 0x2040000:
# pylint: disable-msg=W0622
from sets import Set as set
# pylint: enable-msg=W0622
from invenio.webgroup_dblayer import get_users_in_group, get_group_id
from invenio.access_control_config import InvenioWebAccessFireroleError
from invenio.dbquery import run_sql, blob_to_string
from invenio.config import CFG_CERN_SITE
from invenio.access_control_config import CFG_ACC_EMPTY_ROLE_DEFINITION_SRC, \
CFG_ACC_EMPTY_ROLE_DEFINITION_SER, CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
# INTERFACE
def compile_role_definition(firerole_def_src):
""" Given a text in which every row contains a rule it returns the compiled
object definition.
Rules have the following syntax:
allow|deny [not] field {list of one or more (double)quoted string or regexp}
or allow|deny any
Every row may contain a # sign followed by a comment which are discarded.
Field could be any key contained in a user_info dictionary. If the key does
not exist in the dictionary, the rule is skipped.
The first rule which matches return.
"""
line = 0
ret = []
default_allow_p = False
suggest_apache_p = False
if not firerole_def_src or not firerole_def_src.strip():
firerole_def_src = CFG_ACC_EMPTY_ROLE_DEFINITION_SRC
for row in firerole_def_src.split('\n'):
line += 1
row = row.strip()
if not row:
continue
clean_row = _no_comment_re.sub('', row)
if clean_row:
g = _any_rule_re.match(clean_row)
if g:
default_allow_p = g.group('command').lower() == 'allow'
break
g = _rule_re.match(clean_row)
if g:
allow_p = g.group('command').lower() == 'allow'
not_p = g.group('not') != None
field = g.group('field').lower()
# Renaming groups to group and apache_groups to apache_group
for alias_item in _aliasTable:
if field in alias_item:
field = alias_item[0]
break
if field.startswith('precached_'):
raise InvenioWebAccessFireroleError, "Error while compiling rule %s (line %s): %s is a reserved key and can not be used in FireRole rules!" % (row, line, field)
expressions = g.group('expression')+g.group('more_expressions')
expressions_list = []
for expr in _expressions_re.finditer(expressions):
expr = expr.group()
if expr[0] == '/':
try:
expressions_list.append((True, re.compile(expr[1:-1], re.I)))
except Exception, msg:
raise InvenioWebAccessFireroleError, "Syntax error while compiling rule %s (line %s): %s is not a valid re because %s!" % (row, line, expr, msg)
else:
if field == 'remote_ip' and '/' in expr[1:-1]:
try:
expressions_list.append((False, _ip_matcher_builder(expr[1:-1])))
except Exception, msg:
raise InvenioWebAccessFireroleError, "Syntax error while compiling rule %s (line %s): %s is not a valid ip group because %s!" % (row, line, expr, msg)
else:
expressions_list.append((False, expr[1:-1]))
expressions_list = tuple(expressions_list)
if field in ('apache_group', 'apache_user'):
suggest_apache_p = True
ret.append((allow_p, not_p, field, expressions_list))
else:
raise InvenioWebAccessFireroleError, "Syntax error while compiling rule %s (line %s): not a valid rule!" % (row, line)
return (default_allow_p, suggest_apache_p, tuple(ret))
def repair_role_definitions():
""" Try to rebuild compiled serialized definitions from their respectives
sources. This is needed in case Python break back compatibility.
"""
definitions = run_sql("SELECT id, firerole_def_src FROM accROLE")
for role_id, firerole_def_src in definitions:
run_sql("UPDATE accROLE SET firerole_def_ser=%s WHERE id=%s", (serialize(compile_role_definition(firerole_def_src)), role_id))
def store_role_definition(role_id, firerole_def_ser, firerole_def_src):
""" Store a compiled serialized definition and its source in the database
alongside the role to which it belong.
@param role_id: the role_id
@param firerole_def_ser: the serialized compiled definition
@param firerole_def_src: the sources from which the definition was taken
"""
run_sql("UPDATE accROLE SET firerole_def_ser=%s, firerole_def_src=%s WHERE id=%s", (firerole_def_ser, firerole_def_src, role_id))
def load_role_definition(role_id):
""" Load the definition corresponding to a role. If the compiled definition
is corrupted it try to repairs definitions from their sources and try again
to return the definition.
@param role_id:
@return: a deserialized compiled role definition
"""
res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1)
if res:
try:
return deserialize(res[0][0])
except Exception:
## Something bad might have happened? (Update of Python?)
repair_role_definitions()
res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1)
if res:
return deserialize(res[0][0])
return CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
def acc_firerole_suggest_apache_p(firerole_def_obj):
"""Return True if the given firerole definition suggest the authentication
through Apache."""
try:
default_allow_p, suggest_apache_p, rules = firerole_def_obj
return suggest_apache_p
except Exception, msg:
raise InvenioWebAccessFireroleError, msg
def acc_firerole_extract_emails(firerole_def_obj):
"""
Best effort function to extract all the possible email addresses
authorized by the given firerole.
"""
authorized_emails = set()
try:
default_allow_p, suggest_apache_p, rules = firerole_def_obj
for (allow_p, not_p, field, expressions_list) in rules: # for every rule
if not_p:
continue
if field == 'group':
for reg_p, expr in expressions_list:
if reg_p:
continue
if CFG_CERN_SITE and expr.endswith(' [CERN]'):
authorized_emails.add(expr[:len(' [CERN]')].lower().strip() + '@cern.ch')
emails = run_sql("SELECT user.email FROM usergroup JOIN user_usergroup ON usergroup.id=user_usergroup.id_usergroup JOIN user ON user.id=user_usergroup.id_user WHERE usergroup.name=%s", (expr, ))
for email in emails:
authorized_emails.add(email[0].lower().strip())
elif field == 'email':
for reg_p, expr in expressions_list:
if reg_p:
continue
authorized_emails.add(expr.lower().strip())
elif field == 'uid':
for reg_p, expr in expressions_list:
if reg_p:
continue
email = run_sql("SELECT email FROM user WHERE id=%s", (expr, ))
if email:
authorized_emails.add(email[0][0].lower().strip())
return authorized_emails
except Exception, msg:
raise InvenioWebAccessFireroleError, msg
def acc_firerole_check_user(user_info, firerole_def_obj):
""" Given a user_info dictionary, it matches the rules inside the deserializez
compiled definition in order to discover if the current user match the roles
corresponding to this definition.
@param user_info: a dict produced by collect_user_info which contains every
info about a user
@param firerole_def_obj: a compiled deserialized definition produced by
compile_role_defintion
@return: True if the user match the definition, False otherwise.
"""
try:
default_allow_p, suggest_apache_p, rules = firerole_def_obj
for (allow_p, not_p, field, expressions_list) in rules: # for every rule
group_p = field in ['group', 'apache_group'] # Is it related to group?
ip_p = field == 'remote_ip' # Is it related to Ips?
next_rule_p = False # Silly flag to break 2 for cycle
if not user_info.has_key(field):
continue
for reg_p, expr in expressions_list: # For every element in the rule
if group_p: # Special case: groups
if reg_p: # When it is a regexp
for group in user_info[field]: # iterate over every group
if expr.match(group): # if it matches
if not_p: # if must not match
next_rule_p = True # let's skip to next rule
break
else: # Ok!
return allow_p
if next_rule_p:
break # I said: let's skip to next rule ;-)
elif expr.lower() in [group.lower() for group in user_info[field]]: # Simple expression then just check for expr in groups
if not_p: # If expr is in groups then if must not match
break # let's skip to next rule
else: # Ok!
return allow_p
elif reg_p: # Not a group, then easier. If it's a regexp
if expr.match(user_info[field]): # if it matches
if not_p: # If must not match
break # Let's skip to next rule
else:
return allow_p # Ok!
elif ip_p and type(expr) == type(()): # If it's just a simple expression but an IP!
if _ipmatch(user_info['remote_ip'], expr): # Then if Ip matches
if not_p: # If must not match
break # let's skip to next rule
else:
return allow_p # ok!
elif expr.lower() == user_info[field].lower(): # Finally the easiest one!!
if not_p: # ...
break
else: # ...
return allow_p # ...
if not_p and not next_rule_p: # Nothing has matched and we got not
return allow_p # Then the whole rule matched!
except Exception, msg:
raise InvenioWebAccessFireroleError, msg
return default_allow_p # By default we allow ;-) it'an OpenSource project
def serialize(firerole_def_obj):
""" Serialize and compress a definition."""
if firerole_def_obj == CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ:
return CFG_ACC_EMPTY_ROLE_DEFINITION_SER
elif firerole_def_obj:
return compress(cPickle.dumps(firerole_def_obj, -1))
else:
return CFG_ACC_EMPTY_ROLE_DEFINITION_SER
def deserialize(firerole_def_ser):
""" Deserialize and decompress a definition."""
if firerole_def_ser:
return cPickle.loads(decompress(blob_to_string(firerole_def_ser)))
else:
return CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
# IMPLEMENTATION
# Comment finder
_no_comment_re = re.compile(r'[\s]*(?<!\\)#.*')
# Rule dissecter
_rule_re = re.compile(r'(?P<command>allow|deny)[\s]+(?:(?P<not>not)[\s]+)?(?P<field>[\w]+)[\s]+(?P<expression>(?<!\\)\'.+?(?<!\\)\'|(?<!\\)\".+?(?<!\\)\"|(?<!\\)\/.+?(?<!\\)\/)(?P<more_expressions>([\s]*,[\s]*((?<!\\)\'.+?(?<!\\)\'|(?<!\\)\".+?(?<!\\)\"|(?<!\\)\/.+?(?<!\\)\/))*)(?:[\s]*(?<!\\).*)?', re.I)
_any_rule_re = re.compile(r'(?P<command>allow|deny)[\s]+(any|all)[\s]*', re.I)
# Sub expression finder
_expressions_re = re.compile(r'(?<!\\)\'.+?(?<!\\)\'|(?<!\\)\".+?(?<!\\)\"|(?<!\\)\/.+?(?<!\\)\/')
def _mkip (ip):
""" Compute a numerical value for a dotted IP """
num = 0L
for i in map (int, ip.split ('.')):
num = (num << 8) + i
return num
_full = 2L ** 32 - 1
_aliasTable = (('group', 'groups'), ('apache_group', 'apache_groups'))
def _ip_matcher_builder(group):
""" Compile a string "ip/bitmask" (i.e. 127.0.0.0/24)
@param group: a classical "ip/bitmask" string
@return: a tuple containing the gip and mask in a binary version.
"""
gip, gmk = group.split('/')
gip = _mkip(gip)
gmk = int(gmk)
mask = (_full - (2L ** (32 - gmk) - 1))
if not (gip & mask == gip):
raise InvenioWebAccessFireroleError, "Netmask does not match IP (%Lx %Lx)" % (gip, mask)
return (gip, mask)
def _ipmatch(ip, ip_matcher):
""" Check if an ip matches an ip_group.
@param ip: the ip to check
@param ip_matcher: a compiled ip_group produced by ip_matcher_builder
@return: True if ip matches, False otherwise
"""
return _mkip(ip) & ip_matcher[1] == ip_matcher[0]
| gpl-2.0 | 5,335,946,858,498,810,000 | 44.762658 | 306 | 0.577415 | false |
XiaoxiaoLiu/morphology_analysis | bigneuron/reor_data_for_tile_vis.py | 1 | 2826 | import pandas as pd
import os
import sys
import platform
if (platform.system() == "Linux"):
WORK_PATH = "/local1/xiaoxiaol/work"
else:
WORK_PATH = "/Users/xiaoxiaoliu/work"
p = WORK_PATH + '/src/morphology_analysis'
sys.path.append(p)
import pandas as pd
import numpy as np
import os
import blast_neuron.blast_neuron_comp as bn
data_DIR = "/data/mat/xiaoxiaol/data/big_neuron/silver"
output_dir = "/data/mat/xiaoxiaol/data/big_neuron/silver/0401_gold163_all_soma_sort"
os.system("mkdir "+output_dir)
neuron_distance_csv = "/data/mat/xiaoxiaol/data/big_neuron/silver/20160113_merged_gold_gt/neuron_distances_with_gold.csv"
#num_of_selected_swc = 14
#df_image_location = pd.read_csv('/data/mat/xiaoxiaol/data/Hanchuan_curated/image_file_location_checkup.csv')
df_image_location = pd.read_csv('/home/xiaoxiaol/work/data/Hanchuan_curated/image_file_location_checkup.csv')
keys = df_image_location['image_file_name']
values = df_image_location['file_path']
image_checkup = dict(zip(keys, values))
df_nd = pd.read_csv(neuron_distance_csv)
#merge with the gold79 subset
#df_79 = pd.read_csv('/Users/xiaoxiaoliu/work/data/gold79/gold.csv')
#images = np.unique(df_79['image_file_name'])
#print images.size
images = np.unique(df_nd['image_file_name'])
dfg = df_nd.groupby('image_file_name')
df_ims=pd.DataFrame()
for im in images:
df_image = dfg.get_group(im)
df_ims=df_ims.append(df_image,ignore_index=True)
#print df_image['swc_file']
#sort by distance
#df_image.sort_values(['neuron_distance'], ascending=[1], inplace=True)
#print df_image['swc_file']
tmp = df_image.iloc[0]['swc_file']
im_id = tmp.split('/')[-2] # 2.v3dpbd
# ano_file= im_id+".recons.ano"
# median_log = im_id+".recons.ano.median.log
# median_fn = bn.read_median_swc_log(ano_file, median_log)
# print median_fn
out_dir = output_dir + '/' + im_id.split('.')[0]
#if not os.path.exists(out_dir):
# os.mkdir(out_dir)
gold_swc = df_image.iloc[0]['gold_swc_file']
image_file = image_checkup[im]
#print image_file
output_swc = out_dir+'/00_'+gold_swc.split('/')[-1]
#os.system("cp "+gold_swc + " "+ output_swc)
output_image = out_dir +'/'+im
#copy image
os.system("rm "+output_image)
os.system("cp -f "+image_file + " "+ output_image)
i=1
for swc_file in df_image['swc_file']:
string=str(i)
if i < 10:
string = '0'+str(i)
out_swc = out_dir +'/' + string +'_'+ swc_file.split('/')[-1]
#os.system("cp "+ swc_file + " "+ out_swc)
i=i+1
#bn.genLinkerFile( out_dir, out_dir+"/"+im_id+'.ano')
#df_ims.to_csv(data_DIR+"/gold_trainning_subset/neuron_distances.csv")
#print df_ims.algorithm
#print df_ims.image_file_name
| gpl-3.0 | -2,709,689,753,089,138,700 | 24.926606 | 121 | 0.63942 | false |
anryko/ansible | lib/ansible/modules/cloud/amazon/s3_logging.py | 13 | 5376 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: s3_logging
short_description: Manage logging facility of an s3 bucket in AWS
description:
- Manage logging facility of an s3 bucket in AWS
version_added: "2.0"
author: Rob White (@wimnat)
options:
name:
description:
- "Name of the s3 bucket."
required: true
type: str
state:
description:
- "Enable or disable logging."
default: present
choices: [ 'present', 'absent' ]
type: str
target_bucket:
description:
- "The bucket to log to. Required when state=present."
type: str
target_prefix:
description:
- "The prefix that should be prepended to the generated log files written to the target_bucket."
default: ""
type: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
s3_logging:
name: mywebsite.com
target_bucket: mylogs
target_prefix: logs/mywebsite.com
state: present
- name: Remove logging on an s3 bucket
s3_logging:
name: mywebsite.com
state: absent
'''
try:
import boto.ec2
from boto.s3.connection import OrdinaryCallingFormat, Location
from boto.exception import S3ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info
def compare_bucket_logging(bucket, target_bucket, target_prefix):
bucket_log_obj = bucket.get_logging_status()
if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix:
return False
else:
return True
def enable_bucket_logging(connection, module):
bucket_name = module.params.get("name")
target_bucket = module.params.get("target_bucket")
target_prefix = module.params.get("target_prefix")
changed = False
try:
bucket = connection.get_bucket(bucket_name)
except S3ResponseError as e:
module.fail_json(msg=e.message)
try:
if not compare_bucket_logging(bucket, target_bucket, target_prefix):
# Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket
try:
target_bucket_obj = connection.get_bucket(target_bucket)
except S3ResponseError as e:
if e.status == 301:
module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged")
else:
module.fail_json(msg=e.message)
target_bucket_obj.set_as_logging_target()
bucket.enable_logging(target_bucket, target_prefix)
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def disable_bucket_logging(connection, module):
bucket_name = module.params.get("name")
changed = False
try:
bucket = connection.get_bucket(bucket_name)
if not compare_bucket_logging(bucket, None, None):
bucket.disable_logging()
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
target_bucket=dict(required=False, default=None),
target_prefix=dict(required=False, default=""),
state=dict(required=False, default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
try:
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if connection is None:
connection = boto.connect_s3(**aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
state = module.params.get("state")
if state == 'present':
enable_bucket_logging(connection, module)
elif state == 'absent':
disable_bucket_logging(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,811,785,204,557,483,000 | 29.202247 | 134 | 0.656994 | false |
edx/edxanalytics | src/edx_embed/edx_embed/wsgi.py | 1 | 1140 | """
WSGI config for edx_embed project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "edx_embed.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| agpl-3.0 | 1,823,118,006,196,194,600 | 39.714286 | 79 | 0.799123 | false |
openthread/silk | silk/node/openthread_sniffer.py | 1 | 3372 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module requires that the OpenThread spinel-cli tools are installed
$git clone https://github.com/openthread/pyspinel.git
$cd pyspinel
$sudo python setup.py develop
#$ which sniffer.py (should show up in /usr/local/bin)
You now have two options
$ sudo visudo
Option 1: Add /usr/local/bin to your secure path
Option 2: Create a symlink from a secure path location to the sniffer.py
you found above
This module makes subprocess calls out to sniffer.py to generate packet
captures.
"""
import os
import subprocess
from silk.node.sniffer_base import SnifferNode
import silk.hw.hw_resource as hwr
sniffer_py_path = None
class OpenThreadSniffer(SnifferNode):
def __init__(self):
super().__init__()
self.sniffer_process = None
self.output_path = None
self.outfile = None
self.channel = None
self.fragment_count = 0
global sniffer_py_path
try:
sniffer_py_path = subprocess.check_output(["which", "sniffer.py"]).strip()
except Exception as error:
self.logger.debug("Error getting sniffer.py path: %s" % error)
sniffer_py_path = "/usr/local/bin/sniffer.py"
self.device = hwr.global_instance().get_hw_module(self._hw_model)
def set_up(self):
pass
def tear_down(self):
self.stop()
hwr.global_instance().free_hw_module(self.device)
def start(self, channel, output_path):
self.channel = channel
sniffer_args = [
sniffer_py_path, "-c",
str(channel), "-n 1", "--crc", "-b 115200", "--no-reset", "-u",
self.device.port()
]
self.output_path = os.path.join(output_path, "thread_channel_%s.pcap" % channel)
self.outfile = open(self.output_path, "wb")
self.sniffer_process = subprocess.Popen(sniffer_args, bufsize=0, stdout=self.outfile)
def restart(self):
if self.sniffer_process is not None:
return
self.fragment_count += 1
output_name = os.path.splitext(self.output_path)
self.outfile = open(output_name[0] + "_fragment_{0}".format(self.fragment_count) + output_name[1], "wb")
sniffer_args = [sniffer_py_path, "-c", str(self.channel), "-u", self.device.port()]
self.sniffer_process = subprocess.Popen(sniffer_args, bufsize=0, stdout=self.outfile)
def stop(self):
if self.sniffer_process is not None:
self.sniffer_process.kill()
if self.outfile is not None:
self.outfile.close()
self.sniffer_process = None
self.outfile = None
def get_stats(self):
self.logger.debug("No stats for OpenThread.")
class NordicSniffer(OpenThreadSniffer):
_hw_model = "NordicSniffer"
| apache-2.0 | 5,528,026,267,215,853,000 | 30.514019 | 112 | 0.649466 | false |
vit2/vit-e2 | lib/python/Plugins/SystemPlugins/HdmiCEC/plugin.py | 8 | 5597 | from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import config, getConfigListEntry
from Components.Sources.StaticText import StaticText
class HdmiCECSetupScreen(Screen, ConfigListScreen):
skin = """
<screen position="c-300,c-250" size="600,500" title="HDMI CEC setup">
<widget name="config" position="25,25" size="550,350" />
<widget source="current_address" render="Label" position="25,375" size="550,30" zPosition="10" font="Regular;21" halign="left" valign="center" />
<widget source="fixed_address" render="Label" position="25,405" size="550,30" zPosition="10" font="Regular;21" halign="left" valign="center" />
<ePixmap pixmap="skin_default/buttons/red.png" position="20,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="160,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="300,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="440,e-45" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="20,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="160,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="300,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="440,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
</screen>"""
def __init__(self, session):
self.skin = HdmiCECSetupScreen.skin
Screen.__init__(self, session)
from Components.ActionMap import ActionMap
from Components.Button import Button
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText(_("Set fixed"))
self["key_blue"] = StaticText(_("Clear fixed"))
self["current_address"] = StaticText()
self["fixed_address"] = StaticText()
self["actions"] = ActionMap(["SetupActions", "ColorActions", "MenuActions"],
{
"ok": self.keyGo,
"save": self.keyGo,
"cancel": self.keyCancel,
"green": self.keyGo,
"red": self.keyCancel,
"yellow": self.setFixedAddress,
"blue": self.clearFixedAddress,
"menu": self.closeRecursive,
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session)
self.list.append(getConfigListEntry(_("Enabled"), config.hdmicec.enabled))
self.list.append(getConfigListEntry(_("Put TV in standby"), config.hdmicec.control_tv_standby))
self.list.append(getConfigListEntry(_("Wakeup TV from standby"), config.hdmicec.control_tv_wakeup))
self.list.append(getConfigListEntry(_("Regard deep standby as standby"), config.hdmicec.handle_deepstandby_events))
self.list.append(getConfigListEntry(_("Switch TV to correct input"), config.hdmicec.report_active_source))
self.list.append(getConfigListEntry(_("Use TV remote control"), config.hdmicec.report_active_menu))
self.list.append(getConfigListEntry(_("Handle standby from TV"), config.hdmicec.handle_tv_standby))
self.list.append(getConfigListEntry(_("Handle wakeup from TV"), config.hdmicec.handle_tv_wakeup))
self.list.append(getConfigListEntry(_("Wakeup signal from TV"), config.hdmicec.tv_wakeup_detection))
self.list.append(getConfigListEntry(_("Forward volume keys"), config.hdmicec.volume_forwarding))
self.list.append(getConfigListEntry(_("Put receiver in standby"), config.hdmicec.control_receiver_standby))
self.list.append(getConfigListEntry(_("Wakeup receiver from standby"), config.hdmicec.control_receiver_wakeup))
self.list.append(getConfigListEntry(_("Minimum send interval"), config.hdmicec.minimum_send_interval))
self["config"].list = self.list
self["config"].l.setList(self.list)
self.updateAddress()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def keyGo(self):
for x in self["config"].list:
x[1].save()
self.close()
def keyCancel(self):
for x in self["config"].list:
x[1].cancel()
self.close()
def setFixedAddress(self):
import Components.HdmiCec
Components.HdmiCec.hdmi_cec.setFixedPhysicalAddress(Components.HdmiCec.hdmi_cec.getPhysicalAddress())
self.updateAddress()
def clearFixedAddress(self):
import Components.HdmiCec
Components.HdmiCec.hdmi_cec.setFixedPhysicalAddress("0.0.0.0")
self.updateAddress()
def updateAddress(self):
import Components.HdmiCec
self["current_address"].setText(_("Current CEC address") + ": " + Components.HdmiCec.hdmi_cec.getPhysicalAddress())
if config.hdmicec.fixed_physical_address.value == "0.0.0.0":
fixedaddresslabel = ""
else:
fixedaddresslabel = _("Using fixed address") + ": " + config.hdmicec.fixed_physical_address.value
self["fixed_address"].setText(fixedaddresslabel)
def main(session, **kwargs):
session.open(HdmiCECSetupScreen)
def Plugins(**kwargs):
from os import path
if path.exists("/dev/hdmi_cec") or path.exists("/dev/misc/hdmi_cec0"):
import Components.HdmiCec
from Plugins.Plugin import PluginDescriptor
return [PluginDescriptor(name = "HDMI CEC setup", description = _("Adjust HDMI CEC settings"), where = PluginDescriptor.WHERE_PLUGINMENU, fnc = main)]
return []
| gpl-2.0 | -6,350,070,534,226,412,000 | 48.530973 | 187 | 0.729677 | false |
adminrt/ssbc | search/management/commands/loadlist.py | 36 | 1208 | #coding: utf8
from django.core.management.base import BaseCommand
from django import db as ddb
from search.models import FileList
import pymongo
import json
import binascii
db = pymongo.MongoClient().dht
class Command(BaseCommand):
def handle(self, *args, **options):
#FileList.objects.all().delete()
print 'inputing ...'
total = db.filelist.count()
ii = 0
ready = []
for x in db.filelist.find():
ii += 1
if ii % 200 == 0:
try:
FileList.objects.bulk_create(ready)
except:
for r in ready:
try:
r.save()
except:
import traceback
traceback.print_exc()
ready = []
if ii % 10000 == 0:
print ii * 100 / total, '%', total - ii
ddb.reset_queries()
h = FileList()
h.info_hash = binascii.hexlify(x['_id'])
h.file_list = json.dumps(x['files'])
ready.append(h)
if ready:
FileList.objects.bulk_create(ready)
| gpl-2.0 | -6,755,880,455,603,540,000 | 27.761905 | 55 | 0.470199 | false |
lclchen/open-hackathon | open-hackathon-server/src/hackathon/expr/expr_starter.py | 3 | 4979 | # -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append("..")
from hackathon import Component, RequiredFeature, Context
from hackathon.hmongo.models import Experiment
from hackathon.constants import EStatus, VEStatus
__all__ = ["ExprStarter"]
class ExprStarter(Component):
"""Base for experiment starter"""
template_library = RequiredFeature("template_library")
def start_expr(self, context):
"""To start a new Experiment asynchronously
:type context: Context
:param context: the execution context.
"""
expr = Experiment(status=EStatus.INIT,
template=context.template,
user=context.user,
virtual_environments=[],
hackathon=context.hackathon)
expr.save()
template_content = self.template_library.load_template(context.template)
expr.status = EStatus.STARTING
expr.save()
# context contains complex object, we need create another serializable one with only simple fields
new_context = Context(template_content=template_content,
template_name=context.template.name,
hackathon_id=context.hackathon.id,
experiment_id=expr.id)
if context.get("user", None):
new_context.user_id = context.user.id
self._internal_start_expr(new_context)
new_context.experiment = expr
return new_context
def stop_expr(self, context):
"""Stop experiment asynchronously
:type context: Context
:param context: the execution context.
"""
return self._internal_stop_expr(context)
def rollback(self, context):
"""cancel/rollback a expr which is in error state
:type context: Context
:param context: the execution context.
"""
return self._internal_rollback(context)
def _internal_start_expr(self, context):
raise NotImplementedError()
def _internal_stop_expr(self, context):
raise NotImplementedError()
def _internal_rollback(self, context):
raise NotImplementedError()
def _on_virtual_environment_failed(self, context):
self.rollback(context)
def _on_virtual_environment_success(self, context):
expr = Experiment.objects(id=context.experiment_id).no_dereference() \
.only("status", "virtual_environments").first()
if all(ve.status == VEStatus.RUNNING for ve in expr.virtual_environments):
expr.status = EStatus.RUNNING
expr.save()
self._on_expr_started(context)
self._hooks_on_virtual_environment_success(context)
def _on_virtual_environment_stopped(self, context):
expr = Experiment.objects(id=context.experiment_id).no_dereference() \
.only("status", "virtual_environments").first()
ve = expr.virtual_environments.get(name=context.virtual_environment_name)
ve.status = VEStatus.STOPPED
if all(ve.status == VEStatus.STOPPED for ve in expr.virtual_environments):
expr.status = EStatus.STOPPED
expr.save()
def _on_virtual_environment_unexpected_error(self, context):
self.log.warn("experiment unexpected error: " + context.experiment_id)
expr = Experiment.objects(id=context.experiment_id).no_dereference() \
.only("status", "virtual_environments").first()
if "virtual_environment_name" in context:
expr.virtual_environments.get(name=context.virtual_environment_name).status = VEStatus.UNEXPECTED_ERROR
expr.save()
def _hooks_on_virtual_environment_success(self, context):
pass
def _on_expr_started(self, context):
# send notice
pass
| mit | -2,491,003,159,783,175,000 | 36.149254 | 115 | 0.668943 | false |
jermowery/xos | xos/xos/config.py | 2 | 7526 | #!/usr/bin/env python
import sys
import os
import time
import ConfigParser
import tempfile
import codecs
from StringIO import StringIO
from xml_util import Xml
default_config = \
"""
"""
XOS_DIR = "/opt/xos"
DEFAULT_CONFIG_FN = os.path.join(XOS_DIR, "xos_configuration/")
# warning for now, remove once we're sure everyone has made the change
if (os.path.exists("/opt/planetstack/plstackapi_config") and (not os.path.exists(DEFAULT_CONFIG_FN))):
print >> sys.stderr, "WARNING: did you forget to rename plstackapi_config to xos_config ??"
def isbool(v):
return v.lower() in ("true", "false")
def str2bool(v):
return v.lower() in ("true", "1")
# allow the test framework to apply global overrides to the config framework
override = {}
def set_override(name, value):
override[name] = value
class Config:
def __init__(self, config_file=None):
if (config_file==None):
config_file = self.get_config_fn()
self._files = []
self.config_path = os.path.dirname(config_file)
self.config = ConfigParser.ConfigParser()
self.filename = config_file
if not os.path.isfile(self.filename) and not os.path.isdir(self.filename):
self.create(self.filename)
self.load(self.filename)
def get_config_fn(self):
# Look for "-C <something>" to get the
# name of the config file. Using a real OptionParser here is
# problematic as it will throw 'no such option' errors for options
# that it does not understand.
last = None
for arg in sys.argv:
if (last=="-C"):
return arg
last = arg
return DEFAULT_CONFIG_FN
def _header(self):
header = """
DO NOT EDIT. This file was automatically generated at
%s from:
%s
""" % (time.asctime(), os.linesep.join(self._files))
# Get rid of the surrounding newlines
return header.strip().split(os.linesep)
def create(self, filename):
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
configfile = open(filename, 'w')
configfile.write(default_config)
configfile.close()
def load(self, filename):
if filename:
try:
if os.path.isdir(filename):
config_list = list(reversed(os.listdir(filename)))
config_list.remove('README.md')
config_list = [os.path.join(filename, s) for s in config_list]
self.config.read(config_list)
else:
self.config.read(filename)
except ConfigParser.MissingSectionHeaderError:
if filename.endswith('.xml'):
self.load_xml(filename)
else:
self.load_shell(filename)
self._files.append(filename)
self.set_attributes()
def load_xml(self, filename):
xml = XML(filename)
categories = xml.xpath('//configuration/variables/category')
for category in categories:
section_name = category.get('id')
if not self.config.has_section(section_name):
self.config.add_section(section_name)
options = category.xpath('./variablelist/variable')
for option in options:
option_name = option.get('id')
value = option.xpath('./value')[0].text
if not value:
value = ""
self.config.set(section_name, option_name, value)
def load_shell(self, filename):
f = open(filename, 'r')
for line in f:
try:
if line.startswith('#'):
continue
parts = line.strip().split("=")
if len(parts) < 2:
continue
option = parts[0]
value = parts[1].replace('"', '').replace("'","")
section, var = self.locate_varname(option, strict=False)
if section and var:
self.set(section, var, value)
except:
pass
f.close()
def locate_varname(self, varname, strict=True):
varname = varname.lower()
sections = self.config.sections()
section_name = ""
var_name = ""
for section in sections:
if varname.startswith(section.lower()) and len(section) > len(section_name):
section_name = section.lower()
var_name = varname.replace(section_name, "")[1:]
if strict and not self.config.has_option(section_name, var_name):
raise ConfigParser.NoOptionError(var_name, section_name)
return (section_name, var_name)
def set_attributes(self):
sections = self.config.sections()
for section in sections:
for item in self.config.items(section):
name = "%s_%s" % (section, item[0])
value = item[1]
if isbool(value):
value = str2bool(value)
elif value.isdigit():
value = int(value)
setattr(self, name, value)
setattr(self, name.upper(), value)
def verify(self, config1, config2, validate_method):
return True
def validate_type(self, var_type, value):
return True
@staticmethod
def is_xml(config_file):
try:
x = Xml(config_file)
return True
except:
return False
@staticmethod
def is_ini(config_file):
try:
c = ConfigParser.ConfigParser()
c.read(config_file)
return True
except ConfigParser.MissingSectionHeaderError:
return False
def dump(self, sections = []):
sys.stdout.write(output_python())
def output_python(self, encoding = "utf-8"):
buf = codecs.lookup(encoding)[3](StringIO())
buf.writelines(["# " + line + os.linesep for line in self._header()])
for section in self.sections():
buf.write("[%s]%s" % (section, os.linesep))
for (name,value) in self.items(section):
buf.write("%s=%s%s" % (name,value,os.linesep))
buf.write(os.linesep)
return buf.getvalue()
def output_shell(self, show_comments = True, encoding = "utf-8"):
"""
Return variables as a shell script.
"""
buf = codecs.lookup(encoding)[3](StringIO())
buf.writelines(["# " + line + os.linesep for line in self._header()])
for section in self.sections():
for (name,value) in self.items(section):
# bash does not have the concept of NULL
if value:
option = "%s_%s" % (section.upper(), name.upper())
if isbool(value):
value = str(str2bool(value))
elif not value.isdigit():
value = '"%s"' % value
buf.write(option + "=" + value + os.linesep)
return buf.getvalue()
def output_php(self, encoding = "utf-8"):
"""
Return variables as a PHP script.
"""
buf = codecs.lookup(encoding)[3](StringIO())
buf.write("<?php" + os.linesep)
buf.writelines(["// " + line + os.linesep for line in self._header()])
for section in self.sections():
for (name,value) in self.items(section):
option = "%s_%s" % (section, name)
buf.write(os.linesep)
buf.write("// " + option + os.linesep)
if value is None:
value = 'NULL'
buf.write("define('%s', %s);" % (option, value) + os.linesep)
buf.write("?>" + os.linesep)
return buf.getvalue()
def output_xml(self, encoding = "utf-8"):
pass
def output_variables(self, encoding="utf-8"):
"""
Return list of all variable names.
"""
buf = codecs.lookup(encoding)[3](StringIO())
for section in self.sections():
for (name,value) in self.items(section):
option = "%s_%s" % (section,name)
buf.write(option + os.linesep)
return buf.getvalue()
pass
def write(self, filename=None):
if not filename:
filename = self.filename
configfile = open(filename, 'w')
self.config.write(configfile)
def save(self, filename=None):
self.write(filename)
def __getattr__(self, attr):
if attr in override:
return override[attr]
return getattr(self.config, attr)
if __name__ == '__main__':
filename = None
if len(sys.argv) > 1:
filename = sys.argv[1]
config = Config(filename)
else:
config = Config()
config.dump()
| apache-2.0 | -5,964,964,239,220,960,000 | 26.268116 | 102 | 0.645894 | false |
Alwnikrotikz/visvis | core/axises.py | 3 | 80699 | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
""" Module axises
Defines the Axis wobject class to draw tickmarks and lines for each
dimension.
I chose to name this module using an awkward plural to avoid a name clash
with the axis() function.
"""
# todo: split in multiple modules axis_base axis_2d, axis_3d, axis_polar
import OpenGL.GL as gl
import OpenGL.GLU as glu
import numpy as np
import math
from visvis.utils.pypoints import Pointset, Point
#
from visvis.core import base
from visvis.core.misc import Range, getColor, basestring
from visvis.core.misc import Property, PropWithDraw, DrawAfter
#
from visvis.text import Text
from visvis.core.line import lineStyles, PolarLine
from visvis.core.cameras import depthToZ, TwoDCamera, FlyCamera
# A note about tick labels. We format these such that the width of the ticks
# never becomes larger than 10 characters (including sign bit).
# With a fontsize of 9, this needs little less than 70 pixels. The
# correction applied when visualizing axis (and ticks) is 60, because
# the default offset is 10 pixels for the axes.
# See the docstring of GetTickTexts() for more info.
# create tick units
_tickUnits = []
for e in range(-10, 98):
for i in [10, 20, 25, 50]:
_tickUnits.append( i*10**e)
class AxisText(Text):
""" Text with a disabled Draw() method. """
def Draw(self):
pass
@Property
def x():
"""Get/Set the x position of the text."""
def fget(self):
return self._x
def fset(self, value):
self._x = value
return locals()
@Property
def y():
"""Get/Set the y position of the text."""
def fget(self):
return self._y
def fset(self, value):
self._y = value
return locals()
@Property
def z():
"""Get/Set the z position of the text."""
def fget(self):
return self._z
def fset(self, value):
self._z = value
return locals()
class AxisLabel(AxisText):
""" AxisLabel(parent, text)
A special label that moves itself just past the tickmarks.
The _textDict attribute should contain the Text objects of the tickmarks.
This is a helper class for the axis classes, and has a disabled Draw()
method.
"""
def __init__(self, *args, **kwargs):
Text.__init__(self, *args, **kwargs)
self._textDict = {}
self._move = 0
# upon creation, one typically needs a second draw; only after all
# ticks are drawn can this label be positioned properly.
def OnDrawScreen(self):
# get current position
pos = Point(self._screenx, self._screeny)
# get normal vector eminating from that position
if int(self.textAngle) == 90:
a = (self.textAngle + 90) * np.pi/180
self.valign = 1
distance = 8
else:
a = (self.textAngle - 90) * np.pi/180
self.valign = -1
distance = 3
normal = Point(np.cos(a), np.sin(a)).normalize()
# project the corner points of all text objects to the normal vector.
def project(p,normal):
p = p-pos
phi = abs(normal.angle(p))
return float( p.norm()*np.cos(phi) )
# apply
alpha = []
for text in self._textDict.values():
if text is self:
continue
if not text.isPositioned:
continue # Only consider drawn text objects
x,y = text._screenx, text._screeny
deltax, deltay = text.GetVertexLimits()
xmin, xmax = deltax
ymin, ymax = deltay
alpha.append( project(Point(x+xmin, y+ymin), normal) )
alpha.append( project(Point(x+xmin, y+ymax), normal) )
alpha.append( project(Point(x+xmax, y+ymin), normal) )
alpha.append( project(Point(x+xmax, y+ymax), normal) )
# establish the amount of pixels that we should move along the normal.
if alpha:
self._move = distance+max(alpha)
# move in the direction of the normal
tmp = pos + normal * self._move
self._screenx, self._screeny = int(tmp.x+0.5), int(tmp.y+0.5)
# draw and reset position
Text.OnDrawScreen(self)
self._screenx, self._screeny = pos.x, pos.y
def GetTickTexts(ticks):
""" GetTickTexts(ticks)
Get tick labels of maximally 9 characters (plus sign char).
All ticks will be formatted in the same manner, and with the same number
of decimals. In exponential notation, the exponent is written with as
less characters as possible, leaving more chars for the decimals.
The algorithm is to first test for each tick the number of characters
before the dot, the number of decimals, and the number of chars for
the exponent. Then the ticks are formatted only without exponent if
the first two chars (plus one for the dot) are less than 9.
Examples are:
xx.yyyyyy
xxxxxxx.y
x.yyyye+z
x.yye+zzz
"""
# For padding/unpadding exponent notation
def exp_pad(s, i=1):
return s.lstrip('0').rjust(i,'0')
# Round 1: determine amount of chars before dot, after dot, in exp
minChars1, maxChars1 = 99999, 0
maxChars2 = 0
maxChars3 = 0
for tick in ticks:
# Make abs, our goal is to format the ticks such that without
# the sign char, the string is smaller than 9 chars.
tick = abs(tick)
# Format with exponential notation and get exponent
t = '%1.0e' % tick
i = t.find('e')
expPart = t[i+2:]
# Get number of chars before dot
chars1 = int(expPart)+1
maxChars1 = max(maxChars1, chars1)
minChars1 = min(minChars1, chars1)
# Get number of chars in exponent
maxChars3 = max(maxChars3, len(exp_pad(expPart)))
# Get number of chars after the dot
t = '%1.7f' % tick
i = t.find('.')
decPart = t[i+1:]
maxChars2 = max(maxChars2, len(decPart.rstrip('0')))
# Round 2: Create actual texts
ticks2 = []
if maxChars1 + maxChars2 + 1 <= 9:
# This one is easy
chars2 = maxChars2
f = '%%1.%if' % chars2
for tick in ticks:
# Format tick and store
if tick == -0: tick = 0
ticks2.append( f % tick )
elif maxChars1 < 9:
# Do the best we can
chars2 = 9 - (maxChars1+1)
f = '%%1.%if' % chars2
for tick in ticks:
# Format tick and store
if tick == -0: tick = 0
ticks2.append( f % tick )
else:
# Exponential notation
chars2 = 9 - (4+maxChars3) # 0.xxxe+yy
f = '%%1.%ie' % chars2
for tick in ticks:
# Format tick
if tick == -0: tick = 0
t = f % tick
# Remove zeros in exp
i = t.find('e')
t = t[:i+2] + exp_pad(t[i+2:], maxChars3)
# Store
ticks2.append(t)
# Done
return ticks2
def GetTickText_deprecated(tick):
""" GetTickText(tick)
Obtain text from a tick. Convert to exponential notation
if necessary.
"""
# Correct -0: 0 has on some systems been reported to be shown as -0
if tick == -0:
tick = 0
# Get text
text = '%1.4g' % tick
iExp = text.find('e')
if iExp>0:
front = text[:iExp+2]
text = front + text[iExp+2:].lstrip('0')
return text
def GetTicks(p0, p1, lim, minTickDist=40, givenTicks=None):
""" GetTicks(p0, p1, lim, minTickDist=40, ticks=None)
Get the tick values, position and texts.
These are calculated from a start end end position and the range
of values to map on a straight line between these two points
(which can be 2d or 3d). If givenTicks is given, use these values instead.
"""
# Vector from start to end point
vec = p1-p0
# Init tick stuff
tickValues = []
tickTexts = []
tickPositions = []
if givenTicks is None:
# Calculate all ticks if not given
# Get pixels per unit
if lim.range == 0:
return [],[],[]
# Pixels per unit (use float64 to prevent inf for large numbers)
pixelsPerUnit = float( vec.norm() / lim.range )
# Try all tickunits, starting from the smallest, until we find
# one which results in a distance between ticks more than
# X pixels.
try:
for tickUnit in _tickUnits:
if tickUnit * pixelsPerUnit >= minTickDist:
break
# if the numbers are VERY VERY large (which is very unlikely)
# We use smaller-equal and a multiplication, so the error
# is also raised when pixelsPerUnit and minTickDist are inf.
# Thanks to Torquil Macdonald Sorensen for this bug report.
if tickUnit*pixelsPerUnit <= 0.99*minTickDist:
raise ValueError
except (ValueError, TypeError):
# too small
return [],[],[]
# Calculate the ticks (the values) themselves
firstTick = np.ceil( lim.min/tickUnit ) * tickUnit
lastTick = np.floor( lim.max/tickUnit ) * tickUnit
count = 0
tickValues.append(firstTick)
while tickValues[-1] < lastTick-tickUnit/2:
count += 1
t = firstTick + count*tickUnit
tickValues.append(t)
if count > 1000:
break # Safety
# Get tick texts
tickTexts = GetTickTexts(tickValues)
elif isinstance(givenTicks, dict):
# Use given ticks in dict
for tickValue in givenTicks:
if tickValue >= lim.min and tickValue <= lim.max:
tickText = givenTicks[tickValue]
tickValues.append(tickValue)
if isinstance(tickText, basestring):
tickTexts.append(tickText)
else:
tickTexts.append(str(tickText))
elif isinstance(givenTicks, (tuple,list)):
# Use given ticks as list
# Init temp tick texts list
tickTexts2 = []
for i in range(len(givenTicks)):
# Get tick
t = givenTicks[i]
if isinstance(t, basestring):
tickValue = i
tickText = t
else:
tickValue = float(t)
tickText = None
# Store
if tickValue >= lim.min and tickValue <= lim.max:
tickValues.append(tickValue)
tickTexts2.append(tickText)
# Get tick text that we normally would have used
tickTexts = GetTickTexts(tickValues)
# Replace with any given strings
for i in range(len(tickTexts)):
tmp = tickTexts2[i]
if tmp is not None:
tickTexts[i] = tmp
# Calculate tick positions
for t in tickValues:
pos = p0 + vec * ( (t-lim.min) / lim.range )
tickPositions.append( pos )
# Done
return tickValues, tickPositions, tickTexts
class BaseAxis(base.Wobject):
""" BaseAxis(parent)
This is the (abstract) base class for all axis classes, such
as the CartesianAxis and PolarAxis.
An Axis object represents the lines, ticks and grid that make
up an axis. Not to be confused with an Axes, which represents
a scene and is a Wibject.
"""
# This documentation holds for the 3D axis, the 2D axis is a bit
# simpeler in some aspects.
#
# The scene is limits by the camera limits, thus forming a cube
# The axis is drawn on this square.
# The ASCI-art image below illustrates how the corners of this cube
# are numbered.
#
# The thicks are drawn along three ridges of the cube. A reference
# corner is selected first, which has a corresponding ridge vector.
#
# In orthogonal view, all ridges are parellel, but this is not the
# case in projective view. For each dimension there are 4 ridges to
# consider. Any grid lines are drawn between two ridges. The amount
# of ticks to draw (or minTickDist to be precise) should be determined
# based on the shortest ridge.
#
# 6 O---------------O 7
# /| /|
# / / |
# / | / |
# 3 O---------------O 5 |
# | | | |
# | 2 o- - - - - -|- -O 4
# | / | /
# | | /
# |/ |/
# 0 O---------------O 1
#
# / \ _
# | /|
# | z / x
# | / y ----->
#
def __init__(self, parent):
base.Wobject.__init__(self, parent)
# Make the axis the first wobject in the list. This somehow seems
# right and makes the Axes.axis property faster.
if hasattr(parent, '_wobjects') and self in parent._wobjects:
parent._wobjects.remove(self)
parent._wobjects.insert(0, self)
# Init property variables
self._showBox = True
self._axisColor = (0,0,0)
self._tickFontSize = 9
self._gridLineStyle = ':'
self._xgrid, self._ygrid, self._zgrid = False, False, False
self._xminorgrid, self._yminorgrid, self._zminorgrid =False,False,False
self._xticks, self._yticks, self._zticks = None, None, None
self._xlabel, self._ylabel, self._zlabel = '','',''
# For the cartesian 2D axis, xticks can be rotated
self._xTicksAngle = 0
# Define parameters
self._lineWidth = 1 # 0.8
self._minTickDist = 40
# Corners of a cube in relative coordinates
self._corners = tmp = Pointset(3)
tmp.append(0,0,0); tmp.append(1,0,0); tmp.append(0,1,0);
tmp.append(0,0,1); tmp.append(1,1,0); tmp.append(1,0,1);
tmp.append(0,1,1); tmp.append(1,1,1);
# Indices of the base corners for each dimension.
# The order is very important, don't mess it up...
self._cornerIndicesPerDirection = [ [0,2,6,3], [3,5,1,0], [0,1,4,2] ]
# And the indices of the corresponding pair corners
self._cornerPairIndicesPerDirection = [ [1,4,7,5], [6,7,4,2], [3,5,7,6] ]
# Dicts to be able to optimally reuse text objects; creating new
# text objects or changing the text takes a relatively large amount
# of time (if done every draw).
self._textDicts = [{},{},{}]
## Properties
@PropWithDraw
def showBox():
""" Get/Set whether to show the box of the axis. """
def fget(self):
return self._showBox
def fset(self, value):
self._showBox = bool(value)
return locals()
@PropWithDraw
def axisColor():
""" Get/Set the color of the box, ticklines and tick marks. """
def fget(self):
return self._axisColor
def fset(self, value):
self._axisColor = getColor(value, 'setting axis color')
return locals()
@PropWithDraw
def tickFontSize():
""" Get/Set the font size of the tick marks. """
def fget(self):
return self._tickFontSize
def fset(self, value):
self._tickFontSize = value
return locals()
@PropWithDraw
def gridLineStyle():
""" Get/Set the style of the gridlines as a single char similar
to the lineStyle (ls) property of the line wobject (or in plot). """
def fget(self):
return self._gridLineStyle
def fset(self, value):
if value not in lineStyles:
raise ValueError("Invalid lineStyle for grid lines")
self._gridLineStyle = value
return locals()
@PropWithDraw
def showGridX():
""" Get/Set whether to show a grid for the x dimension. """
def fget(self):
return self._xgrid
def fset(self, value):
self._xgrid = bool(value)
return locals()
@PropWithDraw
def showGridY():
""" Get/Set whether to show a grid for the y dimension. """
def fget(self):
return self._ygrid
def fset(self, value):
self._ygrid = bool(value)
return locals()
@PropWithDraw
def showGridZ():
""" Get/Set whether to show a grid for the z dimension. """
def fget(self):
return self._zgrid
def fset(self, value):
self._zgrid = bool(value)
return locals()
@PropWithDraw
def showGrid():
""" Show/hide the grid for the x,y and z dimension. """
def fget(self):
return self._xgrid, self._ygrid, self._zgrid
def fset(self, value):
if isinstance(value, tuple):
value = tuple([bool(v) for v in value])
self._xgrid, self._ygrid, self._zgrid = value
else:
self._xgrid = self._ygrid = self._zgrid = bool(value)
return locals()
@PropWithDraw
def showMinorGridX():
""" Get/Set whether to show a minor grid for the x dimension. """
def fget(self):
return self._xminorgrid
def fset(self, value):
self._xminorgrid = bool(value)
return locals()
@PropWithDraw
def showMinorGridY():
""" Get/Set whether to show a minor grid for the y dimension. """
def fget(self):
return self._yminorgrid
def fset(self, value):
self._yminorgrid = bool(value)
return locals()
@PropWithDraw
def showMinorGridZ():
""" Get/Set whether to show a minor grid for the z dimension. """
def fget(self):
return self._zminorgrid
def fset(self, value):
self._zminorgrid = bool(value)
return locals()
@PropWithDraw
def showMinorGrid():
""" Show/hide the minor grid for the x, y and z dimension. """
def fget(self):
return self._xminorgrid, self._yminorgrid, self._zminorgrid
def fset(self, value):
if isinstance(value, tuple):
tmp = tuple([bool(v) for v in value])
self._xminorgrid, self._yminorgrid, self._zminorgridd = tmp
else:
tmp = bool(value)
self._xminorgrid = self._yminorgrid = self._zminorgrid = tmp
return locals()
@PropWithDraw
def xTicks():
""" Get/Set the ticks for the x dimension.
The value can be:
* None: the ticks are determined automatically.
* A tuple/list/numpy_array with float or string values: Floats
specify at which location tickmarks should be drawn. Strings are
drawn at integer positions corresponding to the index in the
given list.
* A dict with numbers or strings as values. The values are drawn at
the positions specified by the keys (which should be numbers).
"""
def fget(self):
return self._xticks
def fset(self, value):
m = 'Ticks must be a dict/list/tuple/numpy array of numbers or strings.'
if value is None:
self._xticks = None
elif isinstance(value, dict):
try:
ticks = {}
for key in value:
ticks[key] = str(value[key])
self._xticks = ticks
except Exception:
raise ValueError(m)
elif isinstance(value, (list, tuple, np.ndarray)):
try:
ticks = []
for val in value:
if isinstance(val, basestring):
ticks.append(val)
else:
ticks.append(float(val))
self._xticks = ticks
except Exception:
raise ValueError(m)
else:
raise ValueError(m)
return locals()
@PropWithDraw
def yTicks():
""" Get/Set the ticks for the y dimension.
The value can be:
* None: the ticks are determined automatically.
* A tuple/list/numpy_array with float or string values: Floats
specify at which location tickmarks should be drawn. Strings are
drawn at integer positions corresponding to the index in the
given list.
* A dict with numbers or strings as values. The values are drawn at
the positions specified by the keys (which should be numbers).
"""
def fget(self):
return self._yticks
def fset(self, value):
m = 'Ticks must be a dict/list/tuple/numpy array of numbers or strings.'
if value is None:
self._yticks = None
elif isinstance(value, dict):
try:
ticks = {}
for key in value:
ticks[key] = str(value[key])
self._yticks = ticks
except Exception:
raise ValueError(m)
elif isinstance(value, (list, tuple, np.ndarray)):
try:
ticks = []
for val in value:
if isinstance(val, basestring):
ticks.append(val)
else:
ticks.append(float(val))
self._yticks = ticks
except Exception:
raise ValueError(m)
else:
raise ValueError(m)
return locals()
@PropWithDraw
def zTicks():
""" Get/Set the ticks for the z dimension.
The value can be:
* None: the ticks are determined automatically.
* A tuple/list/numpy_array with float or string values: Floats
specify at which location tickmarks should be drawn. Strings are
drawn at integer positions corresponding to the index in the
given list.
* A dict with numbers or strings as values. The values are drawn at
the positions specified by the keys (which should be numbers).
"""
def fget(self):
return self._zticks
def fset(self, value):
m = 'Ticks must be a dict/list/tuple/numpy array of numbers or strings.'
if value is None:
self._zticks = None
elif isinstance(value, dict):
try:
ticks = {}
for key in value:
ticks[key] = str(value[key])
self._zticks = ticks
except Exception:
raise ValueError(m)
elif isinstance(value, (list, tuple, np.ndarray)):
try:
ticks = []
for val in value:
if isinstance(val, basestring):
ticks.append(val)
else:
ticks.append(float(val))
self._zticks = ticks
except Exception:
raise ValueError(m)
else:
raise ValueError(m)
return locals()
@PropWithDraw
def xLabel():
""" Get/Set the label for the x dimension.
"""
def fget(self):
return self._xlabel
def fset(self, value):
self._xlabel = value
return locals()
@PropWithDraw
def yLabel():
""" Get/Set the label for the y dimension.
"""
def fget(self):
return self._ylabel
def fset(self, value):
self._ylabel = value
return locals()
@PropWithDraw
def zLabel():
""" Get/Set the label for the z dimension.
"""
def fget(self):
return self._zlabel
def fset(self, value):
self._zlabel = value
return locals()
## Methods for drawing
def OnDraw(self, ppc_pps_ppg=None):
# Get axes and return if there is none,
# or if it doesn't want to show an axis.
axes = self.GetAxes()
if not axes:
return
# Calculate lines and labels (or get from argument)
if ppc_pps_ppg:
ppc, pps, ppg = ppc_pps_ppg
else:
try:
ppc, pps, ppg = self._CreateLinesAndLabels(axes)
except Exception:
self.Destroy() # So the error message does not repeat itself
raise
# Store lines to be drawn in screen coordinates
self._pps = pps
# Prepare for drawing lines
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
clr = self._axisColor
gl.glColor(clr[0], clr[1], clr[2])
gl.glLineWidth(self._lineWidth)
# Draw lines
if len(ppc):
gl.glVertexPointerf(ppc.data)
gl.glDrawArrays(gl.GL_LINES, 0, len(ppc))
# Draw gridlines
if len(ppg):
# Set stipple pattern
if not self.gridLineStyle in lineStyles:
stipple = False
else:
stipple = lineStyles[self.gridLineStyle]
if stipple:
gl.glEnable(gl.GL_LINE_STIPPLE)
gl.glLineStipple(1, stipple)
# Draw using array
gl.glVertexPointerf(ppg.data)
gl.glDrawArrays(gl.GL_LINES, 0, len(ppg))
# Clean up
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisable(gl.GL_LINE_STIPPLE)
def OnDrawScreen(self):
# Actually draw the axis
axes = self.GetAxes()
if not axes:
return
# get pointset
if not hasattr(self, '_pps') or not self._pps:
return
pps = self._pps.copy()
pps[:,2] = depthToZ( pps[:,2] )
# Prepare for drawing lines
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glVertexPointerf(pps.data)
if isinstance(axes.camera, TwoDCamera):
gl.glDisable(gl.GL_LINE_SMOOTH)
# Draw lines
clr = self._axisColor
gl.glColor(clr[0], clr[1], clr[2])
gl.glLineWidth(self._lineWidth)
if len(pps):
gl.glDrawArrays(gl.GL_LINES, 0, len(pps))
# Clean up
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnable(gl.GL_LINE_SMOOTH)
## Help methods
def _DestroyChildren(self):
""" Method to clean up the children (text objects).
"""
if self._children:
for child in self.children:
child.Destroy()
def _CalculateCornerPositions(self, xlim, ylim, zlim):
""" Calculate the corner positions in world coorinates
and screen coordinates, given the limits for each dimension.
"""
# To translate to real coordinates
pmin = Point(xlim.min, ylim.min, zlim.min)
pmax = Point(xlim.max, ylim.max, zlim.max)
def relativeToCoord(p):
pi = Point(1,1,1) - p
return pmin*pi + pmax*p
# Get the 8 corners of the cube in real coords and screen pixels
# Note that in perspective mode the screen coords for points behind
# the near clipping plane are undefined. This results in odd values,
# which should be accounted for. This is mostly only a problem for
# the fly camera though.
proj = glu.gluProject
corners8_c = [relativeToCoord(p) for p in self._corners]
corners8_s = [Point(proj(p.x,p.y,p.z)) for p in corners8_c]
# Return
return corners8_c, corners8_s
def _GetTicks(self, tickUnit, lim):
""" Given tickUnit (the distance in world units between the ticks)
and the range to cover (lim), calculate the actual tick values.
"""
# Get position of first and last tick
firstTick = np.ceil( lim.min/tickUnit ) * tickUnit
lastTick = np.floor( lim.max/tickUnit ) * tickUnit
# Valid range?
if firstTick > lim.max or lastTick < lim.min:
return []
# Create ticks
count = 0
ticks = [firstTick]
while ticks[-1] < lastTick-tickUnit:
count += 1
# tmp = firstTick + count*tickUnit
# if abs(tmp/tickUnit) < 10**-10:
# tmp = 0 # due round-off err, 0 can otherwise be 0.5e-17 or so
# ticks.append(tmp)
ticks.append( firstTick + count*tickUnit )
return ticks
def _NextCornerIndex(self, i, d, vector_s):
""" Calculate the next corner index.
"""
if d<2 and vector_s.x >= 0:
i+=self._delta
elif d==2 and vector_s.y < 0:
i+=self._delta
else:
i-=self._delta
if i>3: i=0
if i<0: i=3
return i
def _CreateLinesAndLabels(self, axes):
""" This is the method that calculates where lines should be
drawn and where labels should be placed.
It returns three point sets in which the pairs of points
represent the lines to be drawn (using GL_LINES):
* ppc: lines in real coords
* pps: lines in screen pixels
* ppg: dotted lines in real coords
"""
raise NotImplemented('This is the abstract base class.')
class CartesianAxis2D(BaseAxis):
""" CartesianAxis2D(parent)
An Axis object represents the lines, ticks and grid that make
up an axis. Not to be confused with an Axes, which represents
a scene and is a Wibject.
The CartesianAxis2D is a straightforward axis, drawing straight
lines for cartesian coordinates in 2D.
"""
@PropWithDraw
def xTicksAngle():
""" Get/Set the angle of the tick marks for te x-dimension.
This can be used when the tick labels are long, to prevent
them from overlapping. Note that if this value is non-zero,
the horizontal alignment is changed to left (instead of center).
"""
def fget(self):
return self._xTicksAngle
def fset(self, value):
self._xTicksAngle = value
return locals()
def _CreateLinesAndLabels(self, axes):
""" This is the method that calculates where lines should be
drawn and where labels should be placed.
It returns three point sets in which the pairs of points
represent the lines to be drawn (using GL_LINES):
* ppc: lines in real coords
* pps: lines in screen pixels
* ppg: dotted lines in real coords
"""
# Get camera instance
cam = axes.camera
# Get parameters
drawGrid = [v for v in self.showGrid]
drawMinorGrid = [v for v in self.showMinorGrid]
ticksPerDim = [self.xTicks, self.yTicks]
# Get limits
lims = axes.GetLimits()
lims = [lims[0], lims[1], cam._zlim]
# Get labels
labels = [self.xLabel, self.yLabel]
# Init the new text object dictionaries
newTextDicts = [{},{},{}]
# Init pointsets for drawing lines and gridlines
ppc = Pointset(3) # lines in real coords
pps = Pointset(3) # lines in screen pixels
ppg = Pointset(3) # dotted lines in real coords
# Calculate cornerpositions of the cube
corners8_c, corners8_s = self._CalculateCornerPositions(*lims)
# We use this later to determine the order of the corners
self._delta = 1
for i in axes.daspect:
if i<0: self._delta*=-1
# For each dimension ...
for d in range(2): # d for dimension/direction
lim = lims[d]
# Get the four corners that are of interest for this dimension
# In 2D, the first two are the same as the last two
tmp = self._cornerIndicesPerDirection[d]
tmp = [tmp[i] for i in [0,1,0,1]]
corners4_c = [corners8_c[i] for i in tmp]
corners4_s = [corners8_s[i] for i in tmp]
# Get directional vectors in real coords and screen pixels.
# Easily calculated since the first _corner elements are
# 000,100,010,001
vector_c = corners8_c[d+1] - corners8_c[0]
vector_s = corners8_s[d+1] - corners8_s[0]
# Correct the tickdist for the x-axis if the numbers are large
minTickDist = self._minTickDist
if d==0:
mm = max(abs(lim.min),abs(lim.max))
if mm >= 10000:
minTickDist = 80
# Calculate tick distance in world units
minTickDist *= vector_c.norm() / vector_s.norm()
# Get index of corner to put ticks at
i0 = 0; bestVal = 999999999999999999999999
for i in range(2):
val = corners4_s[i].y
if val < bestVal:
i0 = i
bestVal = val
# Get indices of the two next corners on which
# ridges we may draw grid lines
i1 = self._NextCornerIndex(i0, d, vector_s)
i2 = self._NextCornerIndex(i1, d, vector_s)
# Get first corner and grid vectors
firstCorner = corners4_c[i0]
gv1 = corners4_c[i1] - corners4_c[i0]
gv2 = corners4_c[i2] - corners4_c[i1]
# Get tick vector to indicate tick
gv1s = corners4_s[i1] - corners4_s[i0]
#tv = gv1 * (5 / gv1s.norm() )
npixels = ( gv1s.x**2 + gv1s.y**2 ) ** 0.5 + 0.000001
tv = gv1 * (5.0 / npixels )
# Always draw these corners
pps.append(corners4_s[i0])
pps.append(corners4_s[i0]+vector_s)
# Add line pieces to draw box
if self._showBox:
for i in range(2):
if i != i0:
corner = corners4_s[i]
pps.append(corner)
pps.append(corner+vector_s)
# Get ticks stuff
tickValues = ticksPerDim[d] # can be None
p1, p2 = firstCorner.copy(), firstCorner+vector_c
tmp = GetTicks(p1,p2, lim, minTickDist, tickValues)
ticks, ticksPos, ticksText = tmp
tickUnit = lim.range
if len(ticks)>=2:
tickUnit = ticks[1] - ticks[0]
# Apply Ticks
for tick, pos, text in zip(ticks, ticksPos, ticksText):
# Get little tail to indicate tick
p1 = pos
p2 = pos - tv
# Add tick lines
factor = ( tick-firstCorner[d] ) / vector_c[d]
p1s = corners4_s[i0] + vector_s * factor
tmp = Point(0,0,0)
tmp[int(not d)] = 4
pps.append(p1s)
pps.append(p1s-tmp)
# Put a textlabel at tick
textDict = self._textDicts[d]
if tick in textDict and textDict[tick] in self._children:
t = textDict.pop(tick)
t.text = text
t.x, t.y, t.z = p2.x, p2.y, p2.z
else:
t = AxisText(self,text, p2.x,p2.y,p2.z)
# Add to dict
newTextDicts[d][tick] = t
# Set other properties right
t._visible = True
t.fontSize = self._tickFontSize
t._color = self._axisColor # Use private attr for performance
if d==1:
t.halign = 1
t.valign = 0
else:
t.textAngle = self._xTicksAngle
if self._xTicksAngle > 0:
t.halign = 1
elif self._xTicksAngle < 0:
t.halign = -1
else:
t.halign = 0
if abs(self._xTicksAngle) > 45:
t.valign = 0
else:
t.valign = -1
# We should hide this last tick if it sticks out
if d==0 and len(ticks):
# Get positions
fig = axes.GetFigure()
if fig:
tmp1 = fig.position.width
tmp2 = glu.gluProject(t.x, t.y, t.z)[0]
tmp2 += t.GetVertexLimits()[0][1] # Max of x
# Apply
if tmp1 < tmp2:
t._visible = False
# Get gridlines
if drawGrid[d] or drawMinorGrid[d]:
# Get more gridlines if required
if drawMinorGrid[d]:
ticks = self._GetTicks(tickUnit/5, lim)
# Get positions
for tick in ticks:
# Get tick location
p1 = firstCorner.copy()
p1[d] = tick
# Add gridlines
p3 = p1+gv1
#p4 = p3+gv2
ppg.append(p1); ppg.append(p3)
# Apply label
textDict = self._textDicts[d]
p1 = corners4_c[i0] + vector_c * 0.5
key = '_label_'
if key in textDict and textDict[key] in self._children:
t = textDict.pop(key)
t.text = labels[d]
t.x, t.y, t.z = p1.x, p1.y, p1.z
else:
#t = AxisText(self,labels[d], p1.x,p1.y,p1.z)
t = AxisLabel(self,labels[d], p1.x,p1.y,p1.z)
t.fontSize=10
newTextDicts[d][key] = t
t.halign = 0
t._color = self._axisColor
# Move label to back, so the repositioning works right
if not t in self._children[-3:]:
self._children.remove(t)
self._children.append(t)
# Get vec to calc angle
vec = Point(vector_s.x, vector_s.y)
if vec.x < 0:
vec = vec * -1
t.textAngle = float(vec.angle() * 180/np.pi)
# Keep up to date (so label can move itself just beyond ticks)
t._textDict = newTextDicts[d]
# Correct gridlines so they are all at z=0.
# The grid is always exactly at 0. Images are at -0.1 or less.
# lines and poins are at +0.1
ppg.data[:,2] = 0.0
# Clean up the text objects that are left
for tmp in self._textDicts:
for t in list(tmp.values()):
t.Destroy()
# Store text object dictionaries for next time ...
self._textDicts = newTextDicts
# Return
return ppc, pps, ppg
class CartesianAxis3D(BaseAxis):
""" CartesianAxis3D(parent)
An Axis object represents the lines, ticks and grid that make
up an axis. Not to be confused with an Axes, which represents
a scene and is a Wibject.
The CartesianAxis3D is a straightforward axis, drawing straight
lines for cartesian coordinates in 3D.
"""
def _GetRidgeVector(self, d, corners8_c, corners8_s):
""" _GetRidgeVector(d, corners8_c, corners8_s)
Get the four vectors for the four ridges coming from the
corners that correspond to the given direction.
Also returns the lengths of the smallest vectors, for the
calculation of the minimum tick distance.
"""
# Get the vectors
vectors_c = []
vectors_s = []
for i in range(4):
i1 = self._cornerIndicesPerDirection[d][i]
i2 = self._cornerPairIndicesPerDirection[d][i]
vectors_c.append( corners8_c[i2] - corners8_c[i1])
vectors_s.append( corners8_s[i2] - corners8_s[i1])
# Select the smallest vector (in screen coords)
smallest_i, smallest_L = 0, 9999999999999999999999999.0
for i in range(4):
L = vectors_s[i].x**2 + vectors_s[i].y**2
if L < smallest_L:
smallest_i = i
smallest_L = L
# Return smallest and the vectors
norm_c = vectors_c[smallest_i].norm()
norm_s = smallest_L**0.5
return norm_c, norm_s, vectors_c, vectors_s
def _CreateLinesAndLabels(self, axes):
""" This is the method that calculates where lines should be
drawn and where labels should be placed.
It returns three point sets in which the pairs of points
represent the lines to be drawn (using GL_LINES):
* ppc: lines in real coords
* pps: lines in screen pixels
* ppg: dotted lines in real coords
"""
# Get camera instance
cam = axes.camera
# Get parameters
drawGrid = [v for v in self.showGrid]
drawMinorGrid = [v for v in self.showMinorGrid]
ticksPerDim = [self.xTicks, self.yTicks, self.zTicks]
# Get limits
lims = [cam._xlim, cam._ylim, cam._zlim]
# Get labels
labels = [self.xLabel, self.yLabel, self.zLabel]
# Init the new text object dictionaries
newTextDicts = [{},{},{}]
# Init pointsets for drawing lines and gridlines
ppc = Pointset(3) # lines in real coords
pps = Pointset(3) # lines in screen pixels
ppg = Pointset(3) # dotted lines in real coords
# Calculate cornerpositions of the cube
corners8_c, corners8_s = self._CalculateCornerPositions(*lims)
# we use this later to determine the order of the corners
self._delta = 1
for i in axes.daspect:
if i<0: self._delta*=-1
# For each dimension ...
for d in range(3): # d for dimension/direction
lim = lims[d]
# Get the four corners that are of interest for this dimension
# They represent one of the faces that we might draw in.
tmp = self._cornerIndicesPerDirection[d]
corners4_c = [corners8_c[i] for i in tmp]
corners4_s = [corners8_s[i] for i in tmp]
# Get directional vectors (i.e. ridges) corresponding to
# (emanating from) the four corners. Also returns the length
# of the shortest ridges (in screen coords)
_vectors = self._GetRidgeVector(d, corners8_c, corners8_s)
norm_c, norm_s, vectors4_c, vectors4_s = _vectors
# Due to cords not being defined behind the near clip plane,
# the vectors4_s migt be inaccurate. This means the size and
# angle of the tickmarks may be calculated wrong. It also
# means the norm_s might be wrong. Since this is mostly a problem
# for the fly camera, we use a fixed norm_s in that case. This
# also prevents grid line flicker due to the constant motion
# of the camera.
if isinstance(axes.camera, FlyCamera):
norm_s = axes.position.width
# Calculate tick distance in units (using shortest ridge vector)
minTickDist = self._minTickDist
if norm_s > 0:
minTickDist *= norm_c / norm_s
# Get index of corner to put ticks at.
# This is determined by chosing the corner which is the lowest
# on screen (for x and y), or the most to the left (for z).
i0 = 0; bestVal = 999999999999999999999999
for i in range(4):
if d==2: val = corners4_s[i].x # chose leftmost corner
else: val = corners4_s[i].y # chose bottommost corner
if val < bestVal:
i0 = i
bestVal = val
# Get indices of next corners corresponding to the ridges
# between which we may draw grid lines
# i0, i1, i2 are all in [0,1,2,3]
i1 = self._NextCornerIndex(i0, d, vectors4_s[i0])
i2 = self._NextCornerIndex(i1, d, vectors4_s[i0])
# Get first corner and grid vectors
firstCorner = corners4_c[i0]
gv1 = corners4_c[i1] - corners4_c[i0]
gv2 = corners4_c[i2] - corners4_c[i1]
# Get tick vector to indicate tick
gv1s = corners4_s[i1] - corners4_s[i0]
#tv = gv1 * (5 / gv1s.norm() )
npixels = ( gv1s.x**2 + gv1s.y**2 ) ** 0.5 + 0.000001
tv = gv1 * (5.0 / npixels )
# Draw edge lines (optionally to create a full box)
for i in range(4):
if self._showBox or i in [i0, i1, i2]:
#if self._showBox or i ==i0: # for a real minimalistic axis
# Note that we use world coordinates, rather than screen
# as the 2D axis does.
ppc.append(corners4_c[i])
j = self._cornerPairIndicesPerDirection[d][i]
ppc.append(corners8_c[j])
# Get ticks stuff
tickValues = ticksPerDim[d] # can be None
p1, p2 = firstCorner.copy(), firstCorner+vectors4_c[i0]
tmp = GetTicks(p1,p2, lim, minTickDist, tickValues)
ticks, ticksPos, ticksText = tmp
tickUnit = lim.range
if len(ticks)>=2:
tickUnit = ticks[1] - ticks[0]
# Apply Ticks
for tick, pos, text in zip(ticks, ticksPos, ticksText):
# Get little tail to indicate tick
p1 = pos
p2 = pos - tv
# Add tick lines
ppc.append(p1)
ppc.append(p2)
# z-axis has valign=0, thus needs extra space
if d==2:
text+=' '
# Put textlabel at tick
textDict = self._textDicts[d]
if tick in textDict and textDict[tick] in self._children:
t = textDict.pop(tick)
t.x, t.y, t.z = p2.x, p2.y, p2.z
else:
t = AxisText(self,text, p2.x,p2.y,p2.z)
# Add to dict
newTextDicts[d][tick] = t
# Set other properties right
t._visible = True
if t.fontSize != self._tickFontSize:
t.fontSize = self._tickFontSize
t._color = self._axisColor # Use private attr for performance
if d==2:
t.valign = 0
t.halign = 1
else:
if vectors4_s[i0].y*vectors4_s[i0].x >= 0:
t.halign = -1
t.valign = -1
else:
t.halign = 1
t.valign = -1
# Get gridlines
draw4 = self._showBox and isinstance(axes.camera, FlyCamera)
if drawGrid[d] or drawMinorGrid[d]:
# get more gridlines if required
if drawMinorGrid[d]:
ticks = self._GetTicks(tickUnit/5, lim)
# get positions
for tick in ticks:
# get tick location
p1 = firstCorner.copy()
p1[d] = tick
if tick not in [lim.min, lim.max]: # not ON the box
# add gridlines (back and front)
if True:
p3 = p1+gv1
p4 = p3+gv2
ppg.append(p1); ppg.append(p3)
ppg.append(p3); ppg.append(p4)
if draw4:
p5 = p1+gv2
p6 = p5+gv1
ppg.append(p1); ppg.append(p5)
ppg.append(p5); ppg.append(p6)
# Apply label
textDict = self._textDicts[d]
p1 = corners4_c[i0] + vectors4_c[i0] * 0.5
key = '_label_'
if key in textDict and textDict[key] in self._children:
t = textDict.pop(key)
t.text = labels[d]
t.x, t.y, t.z = p1.x, p1.y, p1.z
else:
#t = AxisText(self,labels[d], p1.x,p1.y,p1.z)
t = AxisLabel(self,labels[d], p1.x,p1.y,p1.z)
t.fontSize=10
newTextDicts[d][key] = t
t.halign = 0
t._color = self._axisColor # Use private attr for performance
# Move to back such that they can position themselves right
if not t in self._children[-3:]:
self._children.remove(t)
self._children.append(t)
# Get vec to calc angle
vec = Point(vectors4_s[i0].x, vectors4_s[i0].y)
if vec.x < 0:
vec = vec * -1
t.textAngle = float(vec.angle() * 180/np.pi)
# Keep up to date (so label can move itself just beyond ticks)
t._textDict = newTextDicts[d]
# Clean up the text objects that are left
for tmp in self._textDicts:
for t in list(tmp.values()):
t.Destroy()
# Store text object dictionaries for next time ...
self._textDicts = newTextDicts
# Return
return ppc, pps, ppg
class CartesianAxis(CartesianAxis2D, CartesianAxis3D):
""" CartesianAxis(parent)
An Axis object represents the lines, ticks and grid that make
up an axis. Not to be confused with an Axes, which represents
a scene and is a Wibject.
The CartesianAxis combines the 2D and 3D axis versions; it uses
the 2D version when the 2d camera is used, and the 3D axis
otherwise.
"""
# A bit ugly inheritance going on here, but otherwise the code below
# would not work ...
def _CreateLinesAndLabels(self, axes):
""" Choose depending on what camera is used. """
if isinstance(axes.camera, TwoDCamera):
return CartesianAxis2D._CreateLinesAndLabels(self,axes)
else:
return CartesianAxis3D._CreateLinesAndLabels(self,axes)
def GetPolarTicks(p0, radius, lim, angularRefPos, sense , minTickDist=100, \
ticks=None):
""" GetPolarTicks(p0, radius, lim, angularRefPos, sense , minTickDist=100,
ticks=None)
Get the tick values, position and texts.
These are calculated from the polar center, radius and the range
of values to map on a straight line between these two points
(which can be 2d or 3d). If ticks is given, use these values instead.
"""
pTickUnits = [1,2,3,5,6,9,18,30,45] # 90 = 3*3*2*5*1
#circumference of circle
circum = 2*np.pi*radius
# Calculate all ticks if not given
if ticks is None:
# Get pixels per unit
if lim.range == 0:
return [],[],[]
pixelsPerUnit = circum / 360 #lim.range
# Try all tickunits, starting from the smallest, until we find
# one which results in a distance between ticks more than
# X pixels.
try:
for tickUnit in pTickUnits :
if tickUnit * pixelsPerUnit >= minTickDist:
break
# if the numbers are VERY VERY large (which is very unlikely)
if tickUnit*pixelsPerUnit < minTickDist:
raise ValueError
except (ValueError, TypeError):
# too small
return [],[],[]
# Calculate the ticks (the values) themselves
ticks = []
firstTick = np.ceil( lim.min/tickUnit ) * tickUnit
lastTick = np.floor( lim.max/tickUnit ) * tickUnit
count = 0
ticks = [firstTick]
while ticks[-1] < lastTick-tickUnit/2:
count += 1
ticks.append( firstTick + count*tickUnit )
# Calculate tick positions and text
ticksPos, ticksText = [], []
for tick in ticks:
theta = angularRefPos + sense*tick*np.pi/180.0
x = radius*np.cos(theta)
y = radius*np.sin(theta)
pos = p0 + Point(x,y,0)
if tick == -0:
tick = 0
text = '%1.4g' % tick
iExp = text.find('e')
if iExp>0:
front = text[:iExp+2]
text = front + text[iExp+2:].lstrip('0')
# Store
ticksPos.append( pos )
ticksText.append( text )
# Done
return ticks, ticksPos, ticksText
class PolarAxis2D(BaseAxis):
""" PolarAxis2D(parent)
An Axis object represents the lines, ticks and grid that make
up an axis. Not to be confused with an Axes, which represents
a scene and is a Wibject.
PolarAxis2D draws a polar grid, and modifies PolarLine objects
to properly plot onto the polar grid. PolarAxis2D has some
specialized methods uniques to it for adjusting the polar plot.
These include:
SetLimits(thetaRange, radialRange):
thetaRange, radialRange = GetLimits():
angularRefPos: Get and Set methods for the relative screen
angle of the 0 degree polar reference. Default is 0 degs
which corresponds to the positive x-axis (y =0)
isCW: Get and Set methods for the sense of rotation CCW or
CW. This method takes/returns a bool (True if the default CW).
Drag mouse up/down to translate radial axis
Drag mouse left/right to rotate angular ref position
Drag mouse + shift key up/down to rescale radial axis (min R fixed)
"""
def __init__(self, parent):
BaseAxis.__init__(self, parent)
self.ppb = None
axes = self.GetAxes()
axes.daspectAuto = False
self.bgcolor = axes.bgcolor
axes.bgcolor = None # disables the default background
# Size of the boarder where circular tick labels are drawn
self.labelPix = 5
self._radialRange = Range(-1, 1) # default
self._angularRange = Range(-179, 180) # always 360 deg
self._angularRefPos = 0
self._sense = 1.0
# Need to overrride this because the PolarAxis has
# four sets of radial ticks (with same dict key!)
self._textDicts = [{}, {}, {}, {}, {}]
# reference stuff for interaction
self.ref_loc = 0, 0, 0 # view_loc when clicked
self.ref_mloc = 0, 0 # mouse location when clicked
self.ref_but = 0 # mouse button when clicked
self.controlIsDown = False
self.shiftIsDown = False
# bind special event for translating lower radial limit
axes.eventKeyDown.Bind(self.OnKeyDown)
axes.eventKeyUp.Bind(self.OnKeyUp)
# Mouse events
axes.eventMouseDown.Bind(self.OnMouseDown)
axes.eventMouseUp.Bind(self.OnMouseUp)
axes.eventMotion.Bind(self.OnMotion)
@DrawAfter
def RescalePolarData(self):
""" RescalePolarData()
This method finds and transforms all polar line data
by the current polar radial axis limits so that data below
the center of the polar plot is set to 0,0,0 and data beyond
the maximum (outter radius) is clipped.
"""
axes = self.GetAxes()
drawObjs = axes.FindObjects(PolarLine)
# Now set the transform for the PolarLine data
for anObj in drawObjs:
anObj.TransformPolar(self._radialRange, \
self._angularRefPos, self._sense)
def _CreateLinesAndLabels(self, axes):
""" This is the method that calculates where polar axis lines
should be drawn and where labels should be placed.
It returns three point sets in which the pairs of points
represent the lines to be drawn (using GL_LINES):
* ppc: lines in real coords
* pps: lines in screen pixels
* ppg: dotted lines in real coords
"""
# Get camera
# This camera has key bindings which are used to
# rescale the lower radial limits. Thus for polar plots the
# user can slide the radial range up
# and down and rotate the plot
cam = axes.camera
# Get axis grid and tick parameters
drawGrid = [v for v in self.showGrid]
drawMinorGrid = [v for v in self.showMinorGrid]
# these are equivalent to axes.thetaTicks and axes.RadialTicks
ticksPerDim = [self.xTicks, self.yTicks]
# Get x-y limits in world coordinates
lims = axes.GetLimits()
lims = [lims[0], lims[1], cam._zlim]
# From current lims calculate the radial axis min and max
# Get labels. These are equivalent to Theta and radial labels
labels = [self.xLabel, self.yLabel]
# Init the new text object dictionaries
# (theta, R(0),R(90),R(180),R(270))
newTextDicts = [{}, {}, {}, {}, {}]
# Init pointsets for drawing lines and gridlines
ppc = Pointset(3) # lines in real coords
pps = Pointset(3) # lines in screen pixels, not used by PolarAxis
ppg = Pointset(3) # dotted lines in real coords (for grids)
# circular background poly for polar ( rectangular bkgd is
# turned off and a circular one drawn instead )
self.ppb = Pointset(3)
# outter circle at max radius
self.ppr = Pointset(3)
# Calculate corner positions of the x-y-z world and screen cube
# Note: Its not clear why you want, or what the meaning
# of x-y-z screen coordinates is (corners8_s) since the
# screen is only 2D
corners8_c, corners8_s = self._CalculateCornerPositions(*lims)
# We use this later to determine the order of the corners
self._delta = 1
for i in axes.daspect:
if i < 0:
self._delta *= -1
# Since in polar coordinates screen and data x and y values
# need to be mapped to theta and R
# PolarAxis calculates things differently from Cartesian2D.
# Also, polar coordinates need to be
# fixed to world coordinates, not screen coordinates
vector_cx = corners8_c[1] - corners8_c[0]
vector_sx = corners8_s[1] - corners8_s[0]
vector_cy = corners8_c[2] - corners8_c[0]
vector_sy = corners8_s[2] - corners8_s[0]
# The screen window may be any rectangular shape and
# for PolarAxis, axes.daspectAuto = False so
# that circles always look like circle
# (x & y are always scaled together).
# The first step is to find the radial extent of the PolarAxis.
# For the axis to fit this will simply be the smallest window size in
# x or y. We also need to reduce it further so
# that tick labels can be drawn
if vector_cx.norm() < vector_cy.norm():
dimMax_c = (vector_cx.norm() / 2)
dimMax_s = (vector_sx.norm() / 2)
else:
dimMax_c = (vector_cy.norm() / 2)
dimMax_s = (vector_sy.norm() / 2)
pix2c = dimMax_c / dimMax_s # for screen to world conversion
txtSize = self.labelPix * pix2c
radiusMax_c = dimMax_c - 3.0 * txtSize # Max radial scale extent
center_c = Point(0.0, 0.0, 0.0)
#self._radialRange = radiusMax_c
radiusMax_c = self._radialRange.range
#==========================================================
# Apply labels
#==========================================================
for d in range(2):
# Get the four corners that are of interest for this dimension
# In 2D, the first two are the same as the last two
tmp = self._cornerIndicesPerDirection[d]
tmp = [tmp[i] for i in [0, 1, 0, 1]]
corners4_c = [corners8_c[i] for i in tmp]
corners4_s = [corners8_s[i] for i in tmp]
# Get index of corner to put ticks at
i0 = 0
bestVal = 999999999999999999999999
for i in range(4):
val = corners4_s[i].y
if val < bestVal:
i0 = i
bestVal = val
# Get directional vectors in real coords and screen pixels.
# Easily calculated since the first _corner elements are
# 000,100,010,001
vector_c = corners8_c[d + 1] - corners8_c[0]
vector_s = corners8_s[d + 1] - corners8_s[0]
textDict = self._textDicts[d]
p1 = corners4_c[i0] + vector_c * 0.5
key = '_label_'
if key in textDict and textDict[key] in self._children:
t = textDict.pop(key)
t.text = labels[d]
t.x, t.y, t.z = p1.x, p1.y, p1.z
else:
#t = AxisText(self,labels[d], p1.x,p1.y,p1.z)
t = AxisLabel(self, labels[d], p1.x, p1.y, p1.z)
t.fontSize = 10
newTextDicts[d][key] = t
t.halign = 0
t._color = self._axisColor # Use private attr for performance
# Move to back
if not t in self._children[-3:]:
self._children.remove(t)
self._children.append(t)
# Get vec to calc angle
vec = Point(vector_s.x, vector_s.y)
if vec.x < 0:
vec = vec * -1
# This was causing weird behaviour, so I commented it out
# t.textAngle = float(vec.angle() * 180/np.pi)
# Keep up to date (so label can move itself just beyond ticks)
t._textDict = newTextDicts[d]
# To make things easier to program I just pulled out
# the Polar angular and radial calulations since they
# are disimilar anyway (i.e. a 'for range(2)' doesn't really help here)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Angular Axis lines, tick and circular background calculations
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# theta axis is circle at the outer radius
# with a line segment every 6 degrees to form circle
theta = self._angularRefPos + \
self._sense * np.linspace(0, 2 * np.pi, 61)
# x,y for background
xb = radiusMax_c * np.cos(theta)
yb = radiusMax_c * np.sin(theta)
#x,y for maximum scale radius
xc = radiusMax_c * np.cos(theta)
yc = radiusMax_c * np.sin(theta)
# ppb is the largest circle that will fit
# and is used to draw the polar background poly
for x, y in np.column_stack((xb, yb)):
self.ppb.append(x, y, -10.0)
for x, y in np.column_stack((xc, yc)):
self.ppr.append(x, y, -1.0)
# polar ticks
# Correct the tickdist for the x-axis if the numbers are large
minTickDist = self._minTickDist
minTickDist = 40 # This should be set by the font size
# Calculate tick distance in world units
minTickDist *= pix2c
tickValues = ticksPerDim[0] # can be None
tmp = GetPolarTicks(center_c, radiusMax_c, self._angularRange, \
self._angularRefPos, self._sense, \
minTickDist, tickValues)
ticks, ticksPos, ticksText = tmp
textRadius = (2.2 * txtSize) + radiusMax_c
# Get tick unit
tickUnit = self._angularRange.range
if len(ticks)>=2:
tickUnit = ticks[1] - ticks[0]
for tick, pos, text in zip(ticks, ticksPos, ticksText):
# Get little tail to indicate tick, current hard coded to 4
p1 = pos
tv = 0.05 * radiusMax_c * p1 / p1.norm()
# polar ticks are inline with vector to tick position
p2s = pos - tv
# Add tick lines
ppc.append(pos)
ppc.append(p2s)
# Text is in word coordinates so need to create them based on ticks
theta = self._angularRefPos + (self._sense * tick * np.pi / 180.0)
p2 = Point((textRadius * np.cos(theta))[0], \
(textRadius * np.sin(theta))[0], 0)
# Put a textlabel at tick
textDict = self._textDicts[0]
if tick in textDict and textDict[tick] in self._children:
t = textDict.pop(tick)
t.x, t.y, t.z = p2.x, p2.y, p2.z
else:
t = AxisText(self, text, p2.x, p2.y, p2.z)
# Add to dict
newTextDicts[0][tick] = t
# Set other properties right
t._visible = True
if t.fontSize != self._tickFontSize:
t.fontSize = self._tickFontSize
t._color = self._axisColor # Use private attr for performance
t.halign = 0
t.valign = 0
#===================================================================
# Get gridlines
if drawGrid[0] or drawMinorGrid[0]:
# Get more gridlines if required
if drawMinorGrid[0]:
ticks = self._GetPolarTicks(tickUnit / 5, self._angularRange)
# Get positions
for tick, p in zip(ticks, ticksPos):
ppg.append(center_c)
ppg.append(p)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# radial Axis lines, tick calculations
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# the radial axis is vertical and horizontal lines through the center
# radial lines every 90 deg
theta = self._angularRefPos + \
self._sense * np.arange(0, 2 * np.pi, np.pi / 2)
xc = radiusMax_c * np.cos(theta)
yc = radiusMax_c * np.sin(theta)
for x, y in np.column_stack((xc, yc)):
ppc.append(0.0, 0.0, 0.0)
ppc.append(x, y, 0.0)
# radial ticks
# Correct the tickdist for the x-axis if the numbers are large
minTickDist = self._minTickDist
# Calculate tick distance in world units
minTickDist *= pix2c
tickValues = ticksPerDim[1] # can be None
ticks, ticksPos, ticksText, quadIndex = [], [], [], []
for index, theta in enumerate(self._angularRefPos + \
self._sense * np.array([0, np.pi / 2, np.pi, np.pi * 3 / 2])):
xc = radiusMax_c * np.cos(theta)
yc = radiusMax_c * np.sin(theta)
p2 = Point(xc, yc, 0)
tmp = GetTicks(center_c, p2, Range(0, radiusMax_c), \
minTickDist, tickValues)
if index == 0:
ticks = ticks + tmp[0]
ticksPos = ticksPos + tmp[1]
quadIndex = quadIndex + [index + 1] * len(tmp[0])
else:
ticks = ticks + tmp[0][1:]
ticksPos = ticksPos + tmp[1][1:]
quadIndex = quadIndex + [index + 1] * len(tmp[1][1:])
for tick, pos, qIndx in zip(ticks, ticksPos, quadIndex):
# Get little tail to indicate tick
tickXformed = tick + self._radialRange.min
text = '%1.4g' % (tickXformed)
iExp = text.find('e')
if iExp > 0:
front = text[:iExp + 2]
text = front + text[iExp + 2:].lstrip('0')
p1 = pos
if (p1.norm() != 0):
tv = (4 * pix2c[0]) * p1 / p1.norm()
tvTxt = ((4 * pix2c[0]) + \
txtSize[0].view(float)) * p1 / p1.norm()
else:
tv = Point(0, 0, 0)
tvTxt = Point(-txtSize[0], 0, 0)
# radial ticks are orthogonal to tick position
tv = Point(tv.y, tv.x, 0)
tvTxt = Point(tvTxt.y, tvTxt.x, 0)
ptic = pos - tv
ptxt = pos - tvTxt
# Add tick lines
ppc = ppc + pos
ppc = ppc + ptic
textDict = self._textDicts[qIndx]
if tickXformed in textDict and \
textDict[tickXformed] in self._children:
t = textDict.pop(tickXformed)
t.x, t.y, t.z = ptxt.x, ptxt.y, ptxt.z
else:
t = AxisText(self, text, ptxt.x, ptxt.y, ptxt.z)
# Add to dict
#print(tick, '=>',text, 'but', t.text)
newTextDicts[qIndx][tickXformed] = t
# Set other properties right
t._visible = True
if t.fontSize != self._tickFontSize:
t.fontSize = self._tickFontSize
t._color = self._axisColor # Use private attr for performance
t.halign = 1
t.valign = 0
#====================================================================
# Get gridlines
if drawGrid[1] or drawMinorGrid[1]:
# Get more gridlines if required
# line segment every 6 degrees to form circle
theta = self._angularRefPos + \
self._sense * np.linspace(0, 2 * np.pi, 61)
if drawMinorGrid[1]:
ticks = self._GetTicks(tickUnit / 5, self._angularRange)
# Get positions
for tick in ticks:
xc = tick * np.cos(theta)
yc = tick * np.sin(theta)
xlast = xc[:-1][0]
ylast = yc[:-1][0]
for x, y in np.column_stack((xc, yc)):
ppg.append(Point(xlast, ylast, 0.0))
ppg.append(Point(x, y, 0.0))
xlast = x
ylast = y
# Clean up the text objects that are left
for tmp in self._textDicts:
for t in list(tmp.values()):
t.Destroy()
# Store text object dictionaries for next time ...
self._textDicts = newTextDicts
# Return points (note: Special PolarAxis points are set as class
# variables since this method was overrridden)
return ppc, pps, ppg
def OnDraw(self):
# Get axes
axes = self.GetAxes()
if not axes:
return
# Calculate lines and labels
try:
ppc, pps, ppg = self._CreateLinesAndLabels(axes)
except Exception:
self.Destroy() # So the error message does not repeat itself
raise
# Draw background and lines
if self.ppb and self.ppr:
# Set view params
s = axes.camera.GetViewParams()
if s['loc'][0] != s['loc'][1] != 0:
axes.camera.SetViewParams(loc=(0,0,0))
# Prepare data for polar coordinates
self.RescalePolarData()
# Prepare for drawing lines and background
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisable(gl.GL_DEPTH_TEST)
# Draw polygon background
clr = 1, 1, 1
gl.glColor3f(clr[0], clr[1], clr[2])
gl.glVertexPointerf(self.ppb.data)
gl.glDrawArrays(gl.GL_POLYGON, 0, len(self.ppb))
# Draw lines
clr = self._axisColor
gl.glColor(clr[0], clr[1], clr[2])
gl.glLineWidth(self._lineWidth)
gl.glVertexPointerf(self.ppr.data)
gl.glDrawArrays(gl.GL_LINE_LOOP, 0, len(self.ppr))
# Clean up
gl.glFlush()
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
# Draw axes lines and text etc.
BaseAxis.OnDraw(self, (ppc, pps, ppg))
def OnKeyDown(self, event):
if event.key == 17 and self.ref_but == 1:
self.shiftIsDown = True
elif event.key == 19 and self.ref_but == 0:
self.controlIsDown = True
return True
def OnKeyUp(self, event):
self.shiftIsDown = False
self.controlIsDown = False
self.ref_but = 0 # in case the mouse was also down
return True
def OnMouseDown(self, event):
# store mouse position and button
self.ref_mloc = event.x, event.y
self.ref_but = event.button
self.ref_lowerRadius = self._radialRange.min
self.ref_angularRefPos = self.angularRefPos
def OnMouseUp(self, event):
self.ref_but = 0
self.Draw()
def OnMotion(self, event):
if not self.ref_but:
return
axes = event.owner
mloc = axes.mousepos
Rrange = self._radialRange.range
if self.ref_but == 1:
# get distance and convert to world coordinates
refloc = axes.camera.ScreenToWorld(self.ref_mloc)
loc = axes.camera.ScreenToWorld(mloc)
# calculate radial and circular ref position translations
dx = loc[0] - refloc[0]
dy = loc[1] - refloc[1]
if self.shiftIsDown:
minRadius = self.ref_lowerRadius - dy
self.SetLimits(rangeR=Range(minRadius, \
minRadius + Rrange))
else:
self.angularRefPos = self.ref_angularRefPos - \
(50 * dx / Rrange)
elif self.ref_but == 2:
# zoom
# Don't care about x zooming for polar plot
# get movement in x (in pixels) and normalize
#factor_x = float(self.ref_mloc[0] - mloc[0])
#factor_x /= axes.position.width
# get movement in y (in pixels) and normalize
factor_y = float(self.ref_mloc[1] - mloc[1])
# normalize by axes height
factor_y /= axes.position.height
# apply (use only y-factor ).
Rrange = Rrange * math.exp(-factor_y)
self.SetLimits(rangeR=Range(self._radialRange.min, \
self._radialRange.min + Rrange))
self.ref_mloc = mloc
self.Draw()
return True
@DrawAfter
def SetLimits(self, rangeTheta=None, rangeR=None, margin=0.04):
""" SetLimits(rangeTheta=None, rangeR=None, margin=0.02)
Set the Polar limits of the scene. These are taken as hints to set
the camera view, and determine where the axis is drawn for the
3D camera.
Either range can be None, rangeTheta can be a scalar since only the
starting position is used. RangeTheta is always 360 degrees
Both rangeTheta dn rangeR can be a 2 element iterable, or a
visvis.Range object. If a range is None, the range is obtained from
the wobjects currently in the scene. To set the range that will fit
all wobjects, simply use "SetLimits()"
The margin represents the fraction of the range to add (default 2%).
"""
if rangeTheta is None or isinstance(rangeTheta, Range):
pass # ok
elif hasattr(rangeTheta, '__len__') and len(rangeTheta) >= 1:
rangeTheta = Range(rangeTheta[0], rangeTheta[0] + 359)
else:
rangeTheta = Range(float(rangeTheta), float(rangeTheta) + 359)
if rangeR is None or isinstance(rangeR, Range):
pass # ok
elif hasattr(rangeR, '__len__') and len(rangeR) == 2:
rangeR = Range(rangeR[0], rangeR[1])
else:
raise ValueError("radial limits should be Range \
or two-element iterables.")
if rangeTheta != None:
self._angularRange = rangeTheta
rR = rangeR
rZ = rangeZ = None
axes = self.GetAxes()
# find outmost range
drawObjs = axes.FindObjects(PolarLine)
# Now set the transform for the PolarLine data
for ob in drawObjs:
# Ask object what it's polar limits are
tmp = ob._GetPolarLimits()
if not tmp:
continue
tmpTheta, tmpR = tmp # in the future may use theta limits
if not tmp:
continue
tmp = ob._GetLimits()
tmpX, tmpY, tmpZ = tmp
# update min/max
if rangeR:
pass
elif tmpR and rR:
rR = Range(min(rR.min, tmpR.min), max(rR.max, tmpR.max))
elif tmpR:
rR = tmpR
if rangeZ:
pass
elif tmpZ and rZ:
rZ = Range(min(rZ.min, tmpZ.min), max(rZ.max, tmpZ.max))
elif tmpX:
rZ = tmpZ
# default values
if rR is None:
rR = Range(-1, 1)
if rZ is None:
rZ = Range(0, 1)
self._radialRange = rR
# apply margins
if margin:
# x
tmp = rR.range * margin
if tmp == 0:
tmp = margin
adjDim = rR.range + tmp
rX = Range(-adjDim, adjDim)
rY = Range(-adjDim, adjDim)
# z
tmp = rZ.range * margin
if tmp == 0:
tmp = margin
rZ = Range(rZ.min - tmp, rZ.max + tmp)
# apply to each camera
for cam in axes._cameras.values():
cam.SetLimits(rX, rY, rZ)
def GetLimits(self):
""" GetLimits()
Get the limits of the polar axis as displayed now.
Returns a tuple of limits for theta and r, respectively.
"""
return self._angularRange, self._radialRange
@PropWithDraw
def angularRefPos():
""" Get/Set the angular reference position in
degrees wrt +x screen axis.
"""
# internal store in radians to avoid constant conversions
def fget(self):
return 180.0 * self._angularRefPos / np.pi
def fset(self, value):
self._angularRefPos = np.pi * int(value) / 180
self.Draw()
return locals()
@PropWithDraw
def isCW():
""" Get/Set the sense of rotation.
"""
def fget(self):
return (self._sense == 1)
def fset(self, value):
if isinstance(value, bool):
if value:
self._sense = 1.0
else:
self._sense = -1.0
self.Draw()
else:
raise Exception("isCW can only be assigned " +
"by a bool (True or False)")
return locals()
| bsd-3-clause | 7,639,935,343,637,107,000 | 34.707522 | 84 | 0.521047 | false |
suquant/django-vimeo | django_vimeo/templatetags/django_vimeo_tags.py | 1 | 5109 | import logging
import re
import requests
from django.template import Library, Node
from django.utils.encoding import smart_str
from django.utils.safestring import mark_safe
from ..exceptions import VideoDoesntExistException
register = Library()
logger = logging.getLogger(__name__)
@register.tag('vimeo')
class VimeoNode(Node):
"""
Template tag ``vimeo``. It gives access to all
Keys: https://developer.vimeo.com/apis/oembed#arguments
Usage (shortcut):
.. code-block:: html+django
{% vimeo instance.vimeo_uri [key1=value1, key2=value2...] %}
Or as a block:
.. code-block:: html+django
{% vimeo instance.vimeo_uri [key1=value1, key2=value2...] as VAR %}
...
{% endvideo %}
Examples:
.. code-block:: html+django
{% vimeo instance.vimeo_uri %}
{% vimeo instance.vimeo_uri width=600 %}
{% vimeo instance.vimeo_uri autoplay=True loop=True as my_video %}
HTML: {{ my_video.html }}
Thumbnail: {{ my_video.thumbnail_url }}
{% endvideo %}
"""
error_msg = 'Syntax error. Expected: ``{% video instance.vimeo_uri ' \
'[key1=val1 key2=val2 ...] [as var] %}``'
re_option = re.compile(r'^(?P<key>[\w]+)=(?P<value>.+)$')
def __init__(self, parser, token):
"""
:param parser: Django template parser
:type parser: django.template.base.Parser
:param token: Django template token
:type token: django.template.base.Token
"""
self.parser = parser
self.bits = list(token.split_contents())
self.tag_name = str(self.pop_bit())
self.video = self.pop_bit()
if len(self.bits) > 1 and self.bits[-2] == 'as':
del self.bits[-2]
self.variable_name = str(self.pop_bit(-1))
self.nodelist_file = parser.parse(('end' + self.tag_name, ))
parser.delete_first_token()
else:
self.variable_name = None
self.options = self.parse_options(self.bits)
def pop_bit(self, index=0):
return self.parser.compile_filter(self.bits.pop(index))
def parse_options(self, bits):
options = {}
for bit in bits:
parsed_bit = self.re_option.match(bit)
key = smart_str(parsed_bit.group('key'))
value = self.parser.compile_filter(parsed_bit.group('value'))
options[key] = value
return options
def render(self, context):
"""
Returns generated HTML.
:param context: Django template RequestContext
:type context: django.template.RequestContext
:return: Rendered HTML with embed video.
:rtype: django.utils.safestring.SafeText | str
"""
video = self.video.resolve(context)
options = self.resolve_options(context)
try:
if not self.variable_name:
return self.embed(video, context=context, **options)
video_meta = video.meta
width, height = options.get('width'), options.get('height')
video_meta.update(
{'optimal_file': video.get_optimal_file(width, height),
'optimal_picture': video.get_optimal_picture(width, height),
'optimal_download': video.get_optimal_download(width, height),
'oembed': video.get_oembed(**options)})
return self.render_block(context, video_meta)
except requests.Timeout:
logger.exception('Timeout reached during rendering embed video (`{0}`)'.format(video))
except VideoDoesntExistException:
logger.exception('Attempt to render not existing video (`{0}`)'.format(video))
return ''
def resolve_options(self, context):
"""
:param context: Django template RequestContext
:type context: django.template.RequestContext
"""
options = {}
for key in self.options:
value = self.options[key]
options[key] = value.resolve(context)
return options
def render_block(self, context, data):
"""
:param context: Django template RequestContext
:type context: django.template.RequestContext
:param backend: Given instance inherited from VideoBackend
:type backend: VideoBackend
:rtype: django.utils.safestring.SafeText
"""
context.push()
context[self.variable_name] = data
output = self.nodelist_file.render(context)
context.pop()
return output
@classmethod
def embed(cls, video, context=None, **options):
"""
Direct render of embed video.
:param video: video
:type video: VimeoFieldFile
:param context: Django template RequestContext
:type context: django.template.RequestContext | None
"""
return mark_safe(video.get_embed_code(**options))
def __iter__(self):
for node in self.nodelist_file:
yield node
def __repr__(self):
return '<VimeoNode "%s">' % self.url | gpl-2.0 | 2,138,548,051,289,716,200 | 33.761905 | 98 | 0.592288 | false |
nash-x/hws | nova/huawei/virt/ironic/rpcapi.py | 1 | 1480 | # -*- encoding: utf-8 -*-
from sys import argv
from oslo.config import cfg
from oslo import messaging
CONF=cfg.CONF
class ironicAgentApi(object):
def __init__(self):
transport = messaging.get_transport(CONF)
target = messaging.Target(topic='ironic-agent')
self.rpc_client = messaging.RPCClient(transport, target)
def get_volume_connector(self, host=None, kwargs=None):
ctxt = self.rpc_client.prepare(server=host, version='1.0', retry=3)
return ctxt.call({}, "get_volume_connector", **kwargs)
def attach_volume(self, host=None, kwargs=None):
ctxt = self.rpc_client.prepare(server=host, version='1.0', retry=3)
return ctxt.call({}, "attach_volume", **kwargs)
def detach_volume(self, host=None, kwargs=None):
ctxt = self.rpc_client.prepare(server=host, version='1.0', retry=3)
return ctxt.call({}, "detach_volume", **kwargs)
def attach_interface(self, host=None, kwargs=None):
ctxt = self.rpc_client.prepare(server=host, version='1.0')
return ctxt.call({}, "attach_interface", **kwargs)
def detach_interface(self, host=None, kwargs=None):
ctxt = self.rpc_client.prepare(server=host, version='1.0')
return ctxt.call({}, "detach_interface", **kwargs)
def clean_local_disk(self, host=None, kwargs=None):
ctxt = self.rpc_client.prepare(server=host, version='1.0')
return ctxt.call({}, "clean_local_disk", **kwargs) | apache-2.0 | 1,559,449,046,261,862,700 | 39.027027 | 75 | 0.646622 | false |
Teagan42/home-assistant | homeassistant/components/trafikverket_weatherstation/sensor.py | 4 | 5220 | """Weather information for air and road temperature (by Trafikverket)."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
from pytrafikverket.trafikverket_weather import TrafikverketWeather
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_API_KEY,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Trafikverket"
ATTR_MEASURE_TIME = "measure_time"
ATTR_ACTIVE = "active"
CONF_STATION = "station"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=10)
SCAN_INTERVAL = timedelta(seconds=300)
SENSOR_TYPES = {
"air_temp": [
"Air temperature",
TEMP_CELSIUS,
"air_temp",
"mdi:thermometer",
DEVICE_CLASS_TEMPERATURE,
],
"road_temp": [
"Road temperature",
TEMP_CELSIUS,
"road_temp",
"mdi:thermometer",
DEVICE_CLASS_TEMPERATURE,
],
"precipitation": [
"Precipitation type",
None,
"precipitationtype",
"mdi:weather-snowy-rainy",
None,
],
"wind_direction": [
"Wind direction",
"°",
"winddirection",
"mdi:flag-triangle",
None,
],
"wind_direction_text": [
"Wind direction text",
None,
"winddirectiontext",
"mdi:flag-triangle",
None,
],
"wind_speed": ["Wind speed", "m/s", "windforce", "mdi:weather-windy", None],
"humidity": [
"Humidity",
"%",
"humidity",
"mdi:water-percent",
DEVICE_CLASS_HUMIDITY,
],
"precipitation_amount": [
"Precipitation amount",
"mm",
"precipitation_amount",
"mdi:cup-water",
None,
],
"precipitation_amountname": [
"Precipitation name",
None,
"precipitation_amountname",
"mdi:weather-pouring",
None,
],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_STATION): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): [vol.In(SENSOR_TYPES)],
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Trafikverket sensor platform."""
sensor_name = config[CONF_NAME]
sensor_api = config[CONF_API_KEY]
sensor_station = config[CONF_STATION]
web_session = async_get_clientsession(hass)
weather_api = TrafikverketWeather(web_session, sensor_api)
dev = []
for condition in config[CONF_MONITORED_CONDITIONS]:
dev.append(
TrafikverketWeatherStation(
weather_api, sensor_name, condition, sensor_station
)
)
if dev:
async_add_entities(dev, True)
class TrafikverketWeatherStation(Entity):
"""Representation of a Trafikverket sensor."""
def __init__(self, weather_api, name, sensor_type, sensor_station):
"""Initialize the sensor."""
self._client = name
self._name = SENSOR_TYPES[sensor_type][0]
self._type = sensor_type
self._state = None
self._unit = SENSOR_TYPES[sensor_type][1]
self._station = sensor_station
self._weather_api = weather_api
self._icon = SENSOR_TYPES[sensor_type][3]
self._device_class = SENSOR_TYPES[sensor_type][4]
self._weather = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._client} {self._name}"
@property
def icon(self):
"""Icon to use in the frontend."""
return self._icon
@property
def device_state_attributes(self):
"""Return the state attributes of Trafikverket Weatherstation."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_ACTIVE: self._weather.active,
ATTR_MEASURE_TIME: self._weather.measure_time,
}
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from Trafikverket and updates the states."""
try:
self._weather = await self._weather_api.async_get_weather(self._station)
self._state = getattr(self._weather, SENSOR_TYPES[self._type][2])
except (asyncio.TimeoutError, aiohttp.ClientError, ValueError) as error:
_LOGGER.error("Could not fetch weather data: %s", error)
| apache-2.0 | -7,280,607,483,734,799,000 | 26.760638 | 86 | 0.617743 | false |
nagyistoce/edx-platform | lms/djangoapps/ccx/overrides.py | 33 | 5443 | """
API related to providing field overrides for individual students. This is used
by the individual custom courses feature.
"""
import json
import logging
from django.db import transaction, IntegrityError
import request_cache
from courseware.field_overrides import FieldOverrideProvider # pylint: disable=import-error
from opaque_keys.edx.keys import CourseKey, UsageKey
from ccx_keys.locator import CCXLocator, CCXBlockUsageLocator
from .models import CcxFieldOverride, CustomCourseForEdX
log = logging.getLogger(__name__)
class CustomCoursesForEdxOverrideProvider(FieldOverrideProvider):
"""
A concrete implementation of
:class:`~courseware.field_overrides.FieldOverrideProvider` which allows for
overrides to be made on a per user basis.
"""
def get(self, block, name, default):
"""
Just call the get_override_for_ccx method if there is a ccx
"""
# The incoming block might be a CourseKey instance of some type, a
# UsageKey instance of some type, or it might be something that has a
# location attribute. That location attribute will be a UsageKey
ccx = course_key = None
identifier = getattr(block, 'id', None)
if isinstance(identifier, CourseKey):
course_key = block.id
elif isinstance(identifier, UsageKey):
course_key = block.id.course_key
elif hasattr(block, 'location'):
course_key = block.location.course_key
else:
msg = "Unable to get course id when calculating ccx overide for block type %r"
log.error(msg, type(block))
if course_key is not None:
ccx = get_current_ccx(course_key)
if ccx:
return get_override_for_ccx(ccx, block, name, default)
return default
@classmethod
def enabled_for(cls, course):
"""CCX field overrides are enabled per-course
protect against missing attributes
"""
return getattr(course, 'enable_ccx', False)
def get_current_ccx(course_key):
"""
Return the ccx that is active for this course.
course_key is expected to be an instance of an opaque CourseKey, a
ValueError is raised if this expectation is not met.
"""
if not isinstance(course_key, CourseKey):
raise ValueError("get_current_ccx requires a CourseKey instance")
if not isinstance(course_key, CCXLocator):
return None
ccx_cache = request_cache.get_cache('ccx')
if course_key not in ccx_cache:
ccx_cache[course_key] = CustomCourseForEdX.objects.get(pk=course_key.ccx)
return ccx_cache[course_key]
def get_override_for_ccx(ccx, block, name, default=None):
"""
Gets the value of the overridden field for the `ccx`. `block` and `name`
specify the block and the name of the field. If the field is not
overridden for the given ccx, returns `default`.
"""
overrides = _get_overrides_for_ccx(ccx)
if isinstance(block.location, CCXBlockUsageLocator):
non_ccx_key = block.location.to_block_locator()
else:
non_ccx_key = block.location
block_overrides = overrides.get(non_ccx_key, {})
if name in block_overrides:
return block.fields[name].from_json(block_overrides[name])
else:
return default
def _get_overrides_for_ccx(ccx):
"""
Returns a dictionary mapping field name to overriden value for any
overrides set on this block for this CCX.
"""
overrides_cache = request_cache.get_cache('ccx-overrides')
if ccx not in overrides_cache:
overrides = {}
query = CcxFieldOverride.objects.filter(
ccx=ccx,
)
for override in query:
block_overrides = overrides.setdefault(override.location, {})
block_overrides[override.field] = json.loads(override.value)
overrides_cache[ccx] = overrides
return overrides_cache[ccx]
@transaction.commit_on_success
def override_field_for_ccx(ccx, block, name, value):
"""
Overrides a field for the `ccx`. `block` and `name` specify the block
and the name of the field on that block to override. `value` is the
value to set for the given field.
"""
field = block.fields[name]
value_json = field.to_json(value)
serialized_value = json.dumps(value_json)
try:
override = CcxFieldOverride.objects.create(
ccx=ccx,
location=block.location,
field=name,
value=serialized_value
)
except IntegrityError:
transaction.commit()
override = CcxFieldOverride.objects.get(
ccx=ccx,
location=block.location,
field=name
)
override.value = serialized_value
override.save()
_get_overrides_for_ccx(ccx).setdefault(block.location, {})[name] = value_json
def clear_override_for_ccx(ccx, block, name):
"""
Clears a previously set field override for the `ccx`. `block` and `name`
specify the block and the name of the field on that block to clear.
This function is idempotent--if no override is set, nothing action is
performed.
"""
try:
CcxFieldOverride.objects.get(
ccx=ccx,
location=block.location,
field=name).delete()
_get_overrides_for_ccx(ccx).setdefault(block.location, {}).pop(name)
except CcxFieldOverride.DoesNotExist:
pass
| agpl-3.0 | -3,629,860,360,964,224,500 | 31.39881 | 92 | 0.656991 | false |
AllanYangZhou/oppia | extensions/interactions/Continue/Continue.py | 1 | 1632 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.interactions import base
class Continue(base.BaseInteraction):
"""Interaction that takes the form of a simple 'Continue' button."""
name = 'Continue Button'
description = 'A simple \'go to next state\' button.'
display_mode = base.DISPLAY_MODE_INLINE
_dependency_ids = []
is_linear = True
instructions = None
narrow_instructions = None
needs_summary = False
default_outcome_heading = 'When the button is clicked'
# Linear interactions are not supposed to have a solution.
can_have_solution = False
# The Continue button is added to the progress nav, but is handled
# separately from the generic Submit button because the text on it can
# change depending on the customization args.
show_generic_submit_button = False
_customization_arg_specs = [{
'name': 'buttonText',
'description': 'Button label',
'schema': {
'type': 'unicode',
},
'default_value': 'Continue',
}]
| apache-2.0 | 892,074,680,913,916,200 | 34.478261 | 74 | 0.69424 | false |
tyrelsouza/glue | tests.py | 1 | 1157 | import unittest
from glue import glue
class GlueTests(unittest.TestCase):
def test_tuple(self):
output = glue("why", "would", "you", "run", "these", "tests")
assert output == "why would you run these tests"
def test_tuple_and_named(self):
output = glue("why", "would", "you", "run", "these", "tests", who="bozo")
assert output == "why would you run these tests bozo"
assert "who" not in output
def test_named(self):
output = glue(what="bozo", who="clown")
assert output == "bozo clown"
assert "what" not in output
def test_flaten(self):
output = glue("why", ["would", ["you", "run"], "these"], "tests")
assert output == "why would you run these tests"
def test_objects(self):
output = glue("why", ["would", ["you", "run"], "these"], "tests", 12345)
assert output == "why would you run these tests 12345"
output = glue("why", ["would", ["you", "run"], "these"], "tests", False)
assert output == "why would you run these tests False"
if __name__ == '__main__':
unittest.main()
| mit | 2,736,788,459,373,340,000 | 35.15625 | 83 | 0.556612 | false |
lgp171188/fjord | vendor/packages/requests-2.7.0/test_requests.py | 6 | 56192 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for Requests."""
from __future__ import division
import json
import os
import pickle
import unittest
import collections
import io
import requests
import pytest
from requests.adapters import HTTPAdapter
from requests.auth import HTTPDigestAuth, _basic_auth_str
from requests.compat import (
Morsel, cookielib, getproxies, str, urljoin, urlparse, is_py3, builtin_str)
from requests.cookies import cookiejar_from_dict, morsel_to_cookie
from requests.exceptions import (ConnectionError, ConnectTimeout,
InvalidSchema, InvalidURL, MissingSchema,
ReadTimeout, Timeout, RetryError)
from requests.models import PreparedRequest
from requests.structures import CaseInsensitiveDict
from requests.sessions import SessionRedirectMixin
from requests.models import urlencode
from requests.hooks import default_hooks
try:
import StringIO
except ImportError:
import io as StringIO
if is_py3:
def u(s):
return s
else:
def u(s):
return s.decode('unicode-escape')
# Requests to this URL should always fail with a connection timeout (nothing
# listening on that port)
TARPIT = 'http://10.255.255.1'
HTTPBIN = os.environ.get('HTTPBIN_URL', 'http://httpbin.org/')
# Issue #1483: Make sure the URL always has a trailing slash
HTTPBIN = HTTPBIN.rstrip('/') + '/'
def httpbin(*suffix):
"""Returns url for HTTPBIN resource."""
return urljoin(HTTPBIN, '/'.join(suffix))
class RequestsTestCase(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
"""Create simple data set with headers."""
pass
def tearDown(self):
"""Teardown."""
pass
def test_entry_points(self):
requests.session
requests.session().get
requests.session().head
requests.get
requests.head
requests.put
requests.patch
requests.post
def test_invalid_url(self):
with pytest.raises(MissingSchema):
requests.get('hiwpefhipowhefopw')
with pytest.raises(InvalidSchema):
requests.get('localhost:3128')
with pytest.raises(InvalidSchema):
requests.get('localhost.localdomain:3128/')
with pytest.raises(InvalidSchema):
requests.get('10.122.1.1:3128/')
with pytest.raises(InvalidURL):
requests.get('http://')
def test_basic_building(self):
req = requests.Request()
req.url = 'http://kennethreitz.org/'
req.data = {'life': '42'}
pr = req.prepare()
assert pr.url == req.url
assert pr.body == 'life=42'
def test_no_content_length(self):
get_req = requests.Request('GET', httpbin('get')).prepare()
assert 'Content-Length' not in get_req.headers
head_req = requests.Request('HEAD', httpbin('head')).prepare()
assert 'Content-Length' not in head_req.headers
def test_override_content_length(self):
headers = {
'Content-Length': 'not zero'
}
r = requests.Request('POST', httpbin('post'), headers=headers).prepare()
assert 'Content-Length' in r.headers
assert r.headers['Content-Length'] == 'not zero'
def test_path_is_not_double_encoded(self):
request = requests.Request('GET', 'http://0.0.0.0/get/test case').prepare()
assert request.path_url == '/get/test%20case'
def test_params_are_added_before_fragment(self):
request = requests.Request('GET',
'http://example.com/path#fragment', params={'a': 'b'}).prepare()
assert request.url == 'http://example.com/path?a=b#fragment'
request = requests.Request('GET',
'http://example.com/path?key=value#fragment', params={'a': 'b'}).prepare()
assert request.url == 'http://example.com/path?key=value&a=b#fragment'
def test_mixed_case_scheme_acceptable(self):
s = requests.Session()
s.proxies = getproxies()
parts = urlparse(httpbin('get'))
schemes = ['http://', 'HTTP://', 'hTTp://', 'HttP://',
'https://', 'HTTPS://', 'hTTps://', 'HttPs://']
for scheme in schemes:
url = scheme + parts.netloc + parts.path
r = requests.Request('GET', url)
r = s.send(r.prepare())
assert r.status_code == 200, 'failed for scheme {0}'.format(scheme)
def test_HTTP_200_OK_GET_ALTERNATIVE(self):
r = requests.Request('GET', httpbin('get'))
s = requests.Session()
s.proxies = getproxies()
r = s.send(r.prepare())
assert r.status_code == 200
def test_HTTP_302_ALLOW_REDIRECT_GET(self):
r = requests.get(httpbin('redirect', '1'))
assert r.status_code == 200
assert r.history[0].status_code == 302
assert r.history[0].is_redirect
# def test_HTTP_302_ALLOW_REDIRECT_POST(self):
# r = requests.post(httpbin('status', '302'), data={'some': 'data'})
# self.assertEqual(r.status_code, 200)
def test_HTTP_200_OK_GET_WITH_PARAMS(self):
heads = {'User-agent': 'Mozilla/5.0'}
r = requests.get(httpbin('user-agent'), headers=heads)
assert heads['User-agent'] in r.text
assert r.status_code == 200
def test_HTTP_200_OK_GET_WITH_MIXED_PARAMS(self):
heads = {'User-agent': 'Mozilla/5.0'}
r = requests.get(httpbin('get') + '?test=true', params={'q': 'test'}, headers=heads)
assert r.status_code == 200
def test_set_cookie_on_301(self):
s = requests.session()
url = httpbin('cookies/set?foo=bar')
s.get(url)
assert s.cookies['foo'] == 'bar'
def test_cookie_sent_on_redirect(self):
s = requests.session()
s.get(httpbin('cookies/set?foo=bar'))
r = s.get(httpbin('redirect/1')) # redirects to httpbin('get')
assert 'Cookie' in r.json()['headers']
def test_cookie_removed_on_expire(self):
s = requests.session()
s.get(httpbin('cookies/set?foo=bar'))
assert s.cookies['foo'] == 'bar'
s.get(
httpbin('response-headers'),
params={
'Set-Cookie':
'foo=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT'
}
)
assert 'foo' not in s.cookies
def test_cookie_quote_wrapped(self):
s = requests.session()
s.get(httpbin('cookies/set?foo="bar:baz"'))
assert s.cookies['foo'] == '"bar:baz"'
def test_cookie_persists_via_api(self):
s = requests.session()
r = s.get(httpbin('redirect/1'), cookies={'foo': 'bar'})
assert 'foo' in r.request.headers['Cookie']
assert 'foo' in r.history[0].request.headers['Cookie']
def test_request_cookie_overrides_session_cookie(self):
s = requests.session()
s.cookies['foo'] = 'bar'
r = s.get(httpbin('cookies'), cookies={'foo': 'baz'})
assert r.json()['cookies']['foo'] == 'baz'
# Session cookie should not be modified
assert s.cookies['foo'] == 'bar'
def test_request_cookies_not_persisted(self):
s = requests.session()
s.get(httpbin('cookies'), cookies={'foo': 'baz'})
# Sending a request with cookies should not add cookies to the session
assert not s.cookies
def test_generic_cookiejar_works(self):
cj = cookielib.CookieJar()
cookiejar_from_dict({'foo': 'bar'}, cj)
s = requests.session()
s.cookies = cj
r = s.get(httpbin('cookies'))
# Make sure the cookie was sent
assert r.json()['cookies']['foo'] == 'bar'
# Make sure the session cj is still the custom one
assert s.cookies is cj
def test_param_cookiejar_works(self):
cj = cookielib.CookieJar()
cookiejar_from_dict({'foo': 'bar'}, cj)
s = requests.session()
r = s.get(httpbin('cookies'), cookies=cj)
# Make sure the cookie was sent
assert r.json()['cookies']['foo'] == 'bar'
def test_requests_in_history_are_not_overridden(self):
resp = requests.get(httpbin('redirect/3'))
urls = [r.url for r in resp.history]
req_urls = [r.request.url for r in resp.history]
assert urls == req_urls
def test_history_is_always_a_list(self):
"""
Show that even with redirects, Response.history is always a list.
"""
resp = requests.get(httpbin('get'))
assert isinstance(resp.history, list)
resp = requests.get(httpbin('redirect/1'))
assert isinstance(resp.history, list)
assert not isinstance(resp.history, tuple)
def test_headers_on_session_with_None_are_not_sent(self):
"""Do not send headers in Session.headers with None values."""
ses = requests.Session()
ses.headers['Accept-Encoding'] = None
req = requests.Request('GET', httpbin('get'))
prep = ses.prepare_request(req)
assert 'Accept-Encoding' not in prep.headers
def test_user_agent_transfers(self):
heads = {
'User-agent': 'Mozilla/5.0 (github.com/kennethreitz/requests)'
}
r = requests.get(httpbin('user-agent'), headers=heads)
assert heads['User-agent'] in r.text
heads = {
'user-agent': 'Mozilla/5.0 (github.com/kennethreitz/requests)'
}
r = requests.get(httpbin('user-agent'), headers=heads)
assert heads['user-agent'] in r.text
def test_HTTP_200_OK_HEAD(self):
r = requests.head(httpbin('get'))
assert r.status_code == 200
def test_HTTP_200_OK_PUT(self):
r = requests.put(httpbin('put'))
assert r.status_code == 200
def test_BASICAUTH_TUPLE_HTTP_200_OK_GET(self):
auth = ('user', 'pass')
url = httpbin('basic-auth', 'user', 'pass')
r = requests.get(url, auth=auth)
assert r.status_code == 200
r = requests.get(url)
assert r.status_code == 401
s = requests.session()
s.auth = auth
r = s.get(url)
assert r.status_code == 200
def test_connection_error_invalid_domain(self):
"""Connecting to an unknown domain should raise a ConnectionError"""
with pytest.raises(ConnectionError):
requests.get('http://doesnotexist.google.com')
def test_connection_error_invalid_port(self):
"""Connecting to an invalid port should raise a ConnectionError"""
with pytest.raises(ConnectionError):
requests.get('http://httpbin.org:1', timeout=1)
def test_LocationParseError(self):
"""Inputing a URL that cannot be parsed should raise an InvalidURL error"""
with pytest.raises(InvalidURL):
requests.get('http://fe80::5054:ff:fe5a:fc0')
def test_basicauth_with_netrc(self):
auth = ('user', 'pass')
wrong_auth = ('wronguser', 'wrongpass')
url = httpbin('basic-auth', 'user', 'pass')
def get_netrc_auth_mock(url):
return auth
requests.sessions.get_netrc_auth = get_netrc_auth_mock
# Should use netrc and work.
r = requests.get(url)
assert r.status_code == 200
# Given auth should override and fail.
r = requests.get(url, auth=wrong_auth)
assert r.status_code == 401
s = requests.session()
# Should use netrc and work.
r = s.get(url)
assert r.status_code == 200
# Given auth should override and fail.
s.auth = wrong_auth
r = s.get(url)
assert r.status_code == 401
def test_DIGEST_HTTP_200_OK_GET(self):
auth = HTTPDigestAuth('user', 'pass')
url = httpbin('digest-auth', 'auth', 'user', 'pass')
r = requests.get(url, auth=auth)
assert r.status_code == 200
r = requests.get(url)
assert r.status_code == 401
s = requests.session()
s.auth = HTTPDigestAuth('user', 'pass')
r = s.get(url)
assert r.status_code == 200
def test_DIGEST_AUTH_RETURNS_COOKIE(self):
url = httpbin('digest-auth', 'auth', 'user', 'pass')
auth = HTTPDigestAuth('user', 'pass')
r = requests.get(url)
assert r.cookies['fake'] == 'fake_value'
r = requests.get(url, auth=auth)
assert r.status_code == 200
def test_DIGEST_AUTH_SETS_SESSION_COOKIES(self):
url = httpbin('digest-auth', 'auth', 'user', 'pass')
auth = HTTPDigestAuth('user', 'pass')
s = requests.Session()
s.get(url, auth=auth)
assert s.cookies['fake'] == 'fake_value'
def test_DIGEST_STREAM(self):
auth = HTTPDigestAuth('user', 'pass')
url = httpbin('digest-auth', 'auth', 'user', 'pass')
r = requests.get(url, auth=auth, stream=True)
assert r.raw.read() != b''
r = requests.get(url, auth=auth, stream=False)
assert r.raw.read() == b''
def test_DIGESTAUTH_WRONG_HTTP_401_GET(self):
auth = HTTPDigestAuth('user', 'wrongpass')
url = httpbin('digest-auth', 'auth', 'user', 'pass')
r = requests.get(url, auth=auth)
assert r.status_code == 401
r = requests.get(url)
assert r.status_code == 401
s = requests.session()
s.auth = auth
r = s.get(url)
assert r.status_code == 401
def test_DIGESTAUTH_QUOTES_QOP_VALUE(self):
auth = HTTPDigestAuth('user', 'pass')
url = httpbin('digest-auth', 'auth', 'user', 'pass')
r = requests.get(url, auth=auth)
assert '"auth"' in r.request.headers['Authorization']
def test_POSTBIN_GET_POST_FILES(self):
url = httpbin('post')
post1 = requests.post(url).raise_for_status()
post1 = requests.post(url, data={'some': 'data'})
assert post1.status_code == 200
with open('requirements.txt') as f:
post2 = requests.post(url, files={'some': f})
assert post2.status_code == 200
post4 = requests.post(url, data='[{"some": "json"}]')
assert post4.status_code == 200
with pytest.raises(ValueError):
requests.post(url, files=['bad file data'])
def test_POSTBIN_GET_POST_FILES_WITH_DATA(self):
url = httpbin('post')
post1 = requests.post(url).raise_for_status()
post1 = requests.post(url, data={'some': 'data'})
assert post1.status_code == 200
with open('requirements.txt') as f:
post2 = requests.post(url,
data={'some': 'data'}, files={'some': f})
assert post2.status_code == 200
post4 = requests.post(url, data='[{"some": "json"}]')
assert post4.status_code == 200
with pytest.raises(ValueError):
requests.post(url, files=['bad file data'])
def test_conflicting_post_params(self):
url = httpbin('post')
with open('requirements.txt') as f:
pytest.raises(ValueError, "requests.post(url, data='[{\"some\": \"data\"}]', files={'some': f})")
pytest.raises(ValueError, "requests.post(url, data=u('[{\"some\": \"data\"}]'), files={'some': f})")
def test_request_ok_set(self):
r = requests.get(httpbin('status', '404'))
assert not r.ok
def test_status_raising(self):
r = requests.get(httpbin('status', '404'))
with pytest.raises(requests.exceptions.HTTPError):
r.raise_for_status()
r = requests.get(httpbin('status', '500'))
assert not r.ok
def test_decompress_gzip(self):
r = requests.get(httpbin('gzip'))
r.content.decode('ascii')
def test_unicode_get(self):
url = httpbin('/get')
requests.get(url, params={'foo': 'føø'})
requests.get(url, params={'føø': 'føø'})
requests.get(url, params={'føø': 'føø'})
requests.get(url, params={'foo': 'foo'})
requests.get(httpbin('ø'), params={'foo': 'foo'})
def test_unicode_header_name(self):
requests.put(
httpbin('put'),
headers={str('Content-Type'): 'application/octet-stream'},
data='\xff') # compat.str is unicode.
def test_pyopenssl_redirect(self):
requests.get('https://httpbin.org/status/301')
def test_urlencoded_get_query_multivalued_param(self):
r = requests.get(httpbin('get'), params=dict(test=['foo', 'baz']))
assert r.status_code == 200
assert r.url == httpbin('get?test=foo&test=baz')
def test_different_encodings_dont_break_post(self):
r = requests.post(httpbin('post'),
data={'stuff': json.dumps({'a': 123})},
params={'blah': 'asdf1234'},
files={'file': ('test_requests.py', open(__file__, 'rb'))})
assert r.status_code == 200
def test_unicode_multipart_post(self):
r = requests.post(httpbin('post'),
data={'stuff': u('ëlïxr')},
files={'file': ('test_requests.py', open(__file__, 'rb'))})
assert r.status_code == 200
r = requests.post(httpbin('post'),
data={'stuff': u('ëlïxr').encode('utf-8')},
files={'file': ('test_requests.py', open(__file__, 'rb'))})
assert r.status_code == 200
r = requests.post(httpbin('post'),
data={'stuff': 'elixr'},
files={'file': ('test_requests.py', open(__file__, 'rb'))})
assert r.status_code == 200
r = requests.post(httpbin('post'),
data={'stuff': 'elixr'.encode('utf-8')},
files={'file': ('test_requests.py', open(__file__, 'rb'))})
assert r.status_code == 200
def test_unicode_multipart_post_fieldnames(self):
filename = os.path.splitext(__file__)[0] + '.py'
r = requests.Request(method='POST',
url=httpbin('post'),
data={'stuff'.encode('utf-8'): 'elixr'},
files={'file': ('test_requests.py',
open(filename, 'rb'))})
prep = r.prepare()
assert b'name="stuff"' in prep.body
assert b'name="b\'stuff\'"' not in prep.body
def test_unicode_method_name(self):
files = {'file': open('test_requests.py', 'rb')}
r = requests.request(
method=u('POST'), url=httpbin('post'), files=files)
assert r.status_code == 200
def test_custom_content_type(self):
r = requests.post(
httpbin('post'),
data={'stuff': json.dumps({'a': 123})},
files={'file1': ('test_requests.py', open(__file__, 'rb')),
'file2': ('test_requests', open(__file__, 'rb'),
'text/py-content-type')})
assert r.status_code == 200
assert b'text/py-content-type' in r.request.body
def test_hook_receives_request_arguments(self):
def hook(resp, **kwargs):
assert resp is not None
assert kwargs != {}
requests.Request('GET', HTTPBIN, hooks={'response': hook})
def test_session_hooks_are_used_with_no_request_hooks(self):
hook = lambda x, *args, **kwargs: x
s = requests.Session()
s.hooks['response'].append(hook)
r = requests.Request('GET', HTTPBIN)
prep = s.prepare_request(r)
assert prep.hooks['response'] != []
assert prep.hooks['response'] == [hook]
def test_session_hooks_are_overriden_by_request_hooks(self):
hook1 = lambda x, *args, **kwargs: x
hook2 = lambda x, *args, **kwargs: x
assert hook1 is not hook2
s = requests.Session()
s.hooks['response'].append(hook2)
r = requests.Request('GET', HTTPBIN, hooks={'response': [hook1]})
prep = s.prepare_request(r)
assert prep.hooks['response'] == [hook1]
def test_prepared_request_hook(self):
def hook(resp, **kwargs):
resp.hook_working = True
return resp
req = requests.Request('GET', HTTPBIN, hooks={'response': hook})
prep = req.prepare()
s = requests.Session()
s.proxies = getproxies()
resp = s.send(prep)
assert hasattr(resp, 'hook_working')
def test_prepared_from_session(self):
class DummyAuth(requests.auth.AuthBase):
def __call__(self, r):
r.headers['Dummy-Auth-Test'] = 'dummy-auth-test-ok'
return r
req = requests.Request('GET', httpbin('headers'))
assert not req.auth
s = requests.Session()
s.auth = DummyAuth()
prep = s.prepare_request(req)
resp = s.send(prep)
assert resp.json()['headers'][
'Dummy-Auth-Test'] == 'dummy-auth-test-ok'
def test_prepare_request_with_bytestring_url(self):
req = requests.Request('GET', b'https://httpbin.org/')
s = requests.Session()
prep = s.prepare_request(req)
assert prep.url == 'https://httpbin.org/'
def test_links(self):
r = requests.Response()
r.headers = {
'cache-control': 'public, max-age=60, s-maxage=60',
'connection': 'keep-alive',
'content-encoding': 'gzip',
'content-type': 'application/json; charset=utf-8',
'date': 'Sat, 26 Jan 2013 16:47:56 GMT',
'etag': '"6ff6a73c0e446c1f61614769e3ceb778"',
'last-modified': 'Sat, 26 Jan 2013 16:22:39 GMT',
'link': ('<https://api.github.com/users/kennethreitz/repos?'
'page=2&per_page=10>; rel="next", <https://api.github.'
'com/users/kennethreitz/repos?page=7&per_page=10>; '
' rel="last"'),
'server': 'GitHub.com',
'status': '200 OK',
'vary': 'Accept',
'x-content-type-options': 'nosniff',
'x-github-media-type': 'github.beta',
'x-ratelimit-limit': '60',
'x-ratelimit-remaining': '57'
}
assert r.links['next']['rel'] == 'next'
def test_cookie_parameters(self):
key = 'some_cookie'
value = 'some_value'
secure = True
domain = 'test.com'
rest = {'HttpOnly': True}
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value, secure=secure, domain=domain, rest=rest)
assert len(jar) == 1
assert 'some_cookie' in jar
cookie = list(jar)[0]
assert cookie.secure == secure
assert cookie.domain == domain
assert cookie._rest['HttpOnly'] == rest['HttpOnly']
def test_cookie_as_dict_keeps_len(self):
key = 'some_cookie'
value = 'some_value'
key1 = 'some_cookie1'
value1 = 'some_value1'
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
d1 = dict(jar)
d2 = dict(jar.iteritems())
d3 = dict(jar.items())
assert len(jar) == 2
assert len(d1) == 2
assert len(d2) == 2
assert len(d3) == 2
def test_cookie_as_dict_keeps_items(self):
key = 'some_cookie'
value = 'some_value'
key1 = 'some_cookie1'
value1 = 'some_value1'
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
d1 = dict(jar)
d2 = dict(jar.iteritems())
d3 = dict(jar.items())
assert d1['some_cookie'] == 'some_value'
assert d2['some_cookie'] == 'some_value'
assert d3['some_cookie1'] == 'some_value1'
def test_cookie_as_dict_keys(self):
key = 'some_cookie'
value = 'some_value'
key1 = 'some_cookie1'
value1 = 'some_value1'
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
keys = jar.keys()
assert keys == list(keys)
# make sure one can use keys multiple times
assert list(keys) == list(keys)
def test_cookie_as_dict_values(self):
key = 'some_cookie'
value = 'some_value'
key1 = 'some_cookie1'
value1 = 'some_value1'
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
values = jar.values()
assert values == list(values)
# make sure one can use values multiple times
assert list(values) == list(values)
def test_cookie_as_dict_items(self):
key = 'some_cookie'
value = 'some_value'
key1 = 'some_cookie1'
value1 = 'some_value1'
jar = requests.cookies.RequestsCookieJar()
jar.set(key, value)
jar.set(key1, value1)
items = jar.items()
assert items == list(items)
# make sure one can use items multiple times
assert list(items) == list(items)
def test_time_elapsed_blank(self):
r = requests.get(httpbin('get'))
td = r.elapsed
total_seconds = ((td.microseconds + (td.seconds + td.days * 24 * 3600)
* 10**6) / 10**6)
assert total_seconds > 0.0
def test_response_is_iterable(self):
r = requests.Response()
io = StringIO.StringIO('abc')
read_ = io.read
def read_mock(amt, decode_content=None):
return read_(amt)
setattr(io, 'read', read_mock)
r.raw = io
assert next(iter(r))
io.close()
def test_response_decode_unicode(self):
"""
When called with decode_unicode, Response.iter_content should always
return unicode.
"""
r = requests.Response()
r._content_consumed = True
r._content = b'the content'
r.encoding = 'ascii'
chunks = r.iter_content(decode_unicode=True)
assert all(isinstance(chunk, str) for chunk in chunks)
# also for streaming
r = requests.Response()
r.raw = io.BytesIO(b'the content')
r.encoding = 'ascii'
chunks = r.iter_content(decode_unicode=True)
assert all(isinstance(chunk, str) for chunk in chunks)
def test_request_and_response_are_pickleable(self):
r = requests.get(httpbin('get'))
# verify we can pickle the original request
assert pickle.loads(pickle.dumps(r.request))
# verify we can pickle the response and that we have access to
# the original request.
pr = pickle.loads(pickle.dumps(r))
assert r.request.url == pr.request.url
assert r.request.headers == pr.request.headers
def test_get_auth_from_url(self):
url = 'http://user:[email protected]/path?query=yes'
assert ('user', 'pass') == requests.utils.get_auth_from_url(url)
def test_get_auth_from_url_encoded_spaces(self):
url = 'http://user:pass%[email protected]/path?query=yes'
assert ('user', 'pass pass') == requests.utils.get_auth_from_url(url)
def test_get_auth_from_url_not_encoded_spaces(self):
url = 'http://user:pass [email protected]/path?query=yes'
assert ('user', 'pass pass') == requests.utils.get_auth_from_url(url)
def test_get_auth_from_url_percent_chars(self):
url = 'http://user%25user:[email protected]/path?query=yes'
assert ('user%user', 'pass') == requests.utils.get_auth_from_url(url)
def test_get_auth_from_url_encoded_hashes(self):
url = 'http://user:pass%[email protected]/path?query=yes'
assert ('user', 'pass#pass') == requests.utils.get_auth_from_url(url)
def test_cannot_send_unprepared_requests(self):
r = requests.Request(url=HTTPBIN)
with pytest.raises(ValueError):
requests.Session().send(r)
def test_http_error(self):
error = requests.exceptions.HTTPError()
assert not error.response
response = requests.Response()
error = requests.exceptions.HTTPError(response=response)
assert error.response == response
error = requests.exceptions.HTTPError('message', response=response)
assert str(error) == 'message'
assert error.response == response
def test_session_pickling(self):
r = requests.Request('GET', httpbin('get'))
s = requests.Session()
s = pickle.loads(pickle.dumps(s))
s.proxies = getproxies()
r = s.send(r.prepare())
assert r.status_code == 200
def test_fixes_1329(self):
"""
Ensure that header updates are done case-insensitively.
"""
s = requests.Session()
s.headers.update({'ACCEPT': 'BOGUS'})
s.headers.update({'accept': 'application/json'})
r = s.get(httpbin('get'))
headers = r.request.headers
assert headers['accept'] == 'application/json'
assert headers['Accept'] == 'application/json'
assert headers['ACCEPT'] == 'application/json'
def test_uppercase_scheme_redirect(self):
parts = urlparse(httpbin('html'))
url = 'HTTP://' + parts.netloc + parts.path
r = requests.get(httpbin('redirect-to'), params={'url': url})
assert r.status_code == 200
assert r.url.lower() == url.lower()
def test_transport_adapter_ordering(self):
s = requests.Session()
order = ['https://', 'http://']
assert order == list(s.adapters)
s.mount('http://git', HTTPAdapter())
s.mount('http://github', HTTPAdapter())
s.mount('http://github.com', HTTPAdapter())
s.mount('http://github.com/about/', HTTPAdapter())
order = [
'http://github.com/about/',
'http://github.com',
'http://github',
'http://git',
'https://',
'http://',
]
assert order == list(s.adapters)
s.mount('http://gittip', HTTPAdapter())
s.mount('http://gittip.com', HTTPAdapter())
s.mount('http://gittip.com/about/', HTTPAdapter())
order = [
'http://github.com/about/',
'http://gittip.com/about/',
'http://github.com',
'http://gittip.com',
'http://github',
'http://gittip',
'http://git',
'https://',
'http://',
]
assert order == list(s.adapters)
s2 = requests.Session()
s2.adapters = {'http://': HTTPAdapter()}
s2.mount('https://', HTTPAdapter())
assert 'http://' in s2.adapters
assert 'https://' in s2.adapters
def test_header_remove_is_case_insensitive(self):
# From issue #1321
s = requests.Session()
s.headers['foo'] = 'bar'
r = s.get(httpbin('get'), headers={'FOO': None})
assert 'foo' not in r.request.headers
def test_params_are_merged_case_sensitive(self):
s = requests.Session()
s.params['foo'] = 'bar'
r = s.get(httpbin('get'), params={'FOO': 'bar'})
assert r.json()['args'] == {'foo': 'bar', 'FOO': 'bar'}
def test_long_authinfo_in_url(self):
url = 'http://{0}:{1}@{2}:9000/path?query#frag'.format(
'E8A3BE87-9E3F-4620-8858-95478E385B5B',
'EA770032-DA4D-4D84-8CE9-29C6D910BF1E',
'exactly-------------sixty-----------three------------characters',
)
r = requests.Request('GET', url).prepare()
assert r.url == url
def test_header_keys_are_native(self):
headers = {u('unicode'): 'blah', 'byte'.encode('ascii'): 'blah'}
r = requests.Request('GET', httpbin('get'), headers=headers)
p = r.prepare()
# This is testing that they are builtin strings. A bit weird, but there
# we go.
assert 'unicode' in p.headers.keys()
assert 'byte' in p.headers.keys()
def test_can_send_nonstring_objects_with_files(self):
data = {'a': 0.0}
files = {'b': 'foo'}
r = requests.Request('POST', httpbin('post'), data=data, files=files)
p = r.prepare()
assert 'multipart/form-data' in p.headers['Content-Type']
def test_can_send_bytes_bytearray_objects_with_files(self):
# Test bytes:
data = {'a': 'this is a string'}
files = {'b': b'foo'}
r = requests.Request('POST', httpbin('post'), data=data, files=files)
p = r.prepare()
assert 'multipart/form-data' in p.headers['Content-Type']
# Test bytearrays:
files = {'b': bytearray(b'foo')}
r = requests.Request('POST', httpbin('post'), data=data, files=files)
p = r.prepare()
assert 'multipart/form-data' in p.headers['Content-Type']
def test_can_send_file_object_with_non_string_filename(self):
f = io.BytesIO()
f.name = 2
r = requests.Request('POST', httpbin('post'), files={'f': f})
p = r.prepare()
assert 'multipart/form-data' in p.headers['Content-Type']
def test_autoset_header_values_are_native(self):
data = 'this is a string'
length = '16'
req = requests.Request('POST', httpbin('post'), data=data)
p = req.prepare()
assert p.headers['Content-Length'] == length
def test_nonhttp_schemes_dont_check_URLs(self):
test_urls = (
'data:image/gif;base64,R0lGODlhAQABAHAAACH5BAUAAAAALAAAAAABAAEAAAICRAEAOw==',
'file:///etc/passwd',
'magnet:?xt=urn:btih:be08f00302bc2d1d3cfa3af02024fa647a271431',
)
for test_url in test_urls:
req = requests.Request('GET', test_url)
preq = req.prepare()
assert test_url == preq.url
def test_auth_is_stripped_on_redirect_off_host(self):
r = requests.get(
httpbin('redirect-to'),
params={'url': 'http://www.google.co.uk'},
auth=('user', 'pass'),
)
assert r.history[0].request.headers['Authorization']
assert not r.request.headers.get('Authorization', '')
def test_auth_is_retained_for_redirect_on_host(self):
r = requests.get(httpbin('redirect/1'), auth=('user', 'pass'))
h1 = r.history[0].request.headers['Authorization']
h2 = r.request.headers['Authorization']
assert h1 == h2
def test_manual_redirect_with_partial_body_read(self):
s = requests.Session()
r1 = s.get(httpbin('redirect/2'), allow_redirects=False, stream=True)
assert r1.is_redirect
rg = s.resolve_redirects(r1, r1.request, stream=True)
# read only the first eight bytes of the response body,
# then follow the redirect
r1.iter_content(8)
r2 = next(rg)
assert r2.is_redirect
# read all of the response via iter_content,
# then follow the redirect
for _ in r2.iter_content():
pass
r3 = next(rg)
assert not r3.is_redirect
def _patch_adapter_gzipped_redirect(self, session, url):
adapter = session.get_adapter(url=url)
org_build_response = adapter.build_response
self._patched_response = False
def build_response(*args, **kwargs):
resp = org_build_response(*args, **kwargs)
if not self._patched_response:
resp.raw.headers['content-encoding'] = 'gzip'
self._patched_response = True
return resp
adapter.build_response = build_response
def test_redirect_with_wrong_gzipped_header(self):
s = requests.Session()
url = httpbin('redirect/1')
self._patch_adapter_gzipped_redirect(s, url)
s.get(url)
def test_basic_auth_str_is_always_native(self):
s = _basic_auth_str('test', 'test')
assert isinstance(s, builtin_str)
assert s == 'Basic dGVzdDp0ZXN0'
def test_requests_history_is_saved(self):
r = requests.get(httpbin('redirect/5'))
total = r.history[-1].history
i = 0
for item in r.history:
assert item.history == total[0:i]
i = i + 1
def test_json_param_post_content_type_works(self):
r = requests.post(
httpbin('post'),
json={'life': 42}
)
assert r.status_code == 200
assert 'application/json' in r.request.headers['Content-Type']
assert {'life': 42} == r.json()['json']
def test_response_iter_lines(self):
r = requests.get(httpbin('stream/4'), stream=True)
assert r.status_code == 200
it = r.iter_lines()
next(it)
assert len(list(it)) == 3
@pytest.mark.xfail
def test_response_iter_lines_reentrant(self):
"""Response.iter_lines() is not reentrant safe"""
r = requests.get(httpbin('stream/4'), stream=True)
assert r.status_code == 200
next(r.iter_lines())
assert len(list(r.iter_lines())) == 3
class TestContentEncodingDetection(unittest.TestCase):
def test_none(self):
encodings = requests.utils.get_encodings_from_content('')
assert not len(encodings)
def test_html_charset(self):
"""HTML5 meta charset attribute"""
content = '<meta charset="UTF-8">'
encodings = requests.utils.get_encodings_from_content(content)
assert len(encodings) == 1
assert encodings[0] == 'UTF-8'
def test_html4_pragma(self):
"""HTML4 pragma directive"""
content = '<meta http-equiv="Content-type" content="text/html;charset=UTF-8">'
encodings = requests.utils.get_encodings_from_content(content)
assert len(encodings) == 1
assert encodings[0] == 'UTF-8'
def test_xhtml_pragma(self):
"""XHTML 1.x served with text/html MIME type"""
content = '<meta http-equiv="Content-type" content="text/html;charset=UTF-8" />'
encodings = requests.utils.get_encodings_from_content(content)
assert len(encodings) == 1
assert encodings[0] == 'UTF-8'
def test_xml(self):
"""XHTML 1.x served as XML"""
content = '<?xml version="1.0" encoding="UTF-8"?>'
encodings = requests.utils.get_encodings_from_content(content)
assert len(encodings) == 1
assert encodings[0] == 'UTF-8'
def test_precedence(self):
content = '''
<?xml version="1.0" encoding="XML"?>
<meta charset="HTML5">
<meta http-equiv="Content-type" content="text/html;charset=HTML4" />
'''.strip()
encodings = requests.utils.get_encodings_from_content(content)
assert encodings == ['HTML5', 'HTML4', 'XML']
class TestCaseInsensitiveDict(unittest.TestCase):
def test_mapping_init(self):
cid = CaseInsensitiveDict({'Foo': 'foo', 'BAr': 'bar'})
assert len(cid) == 2
assert 'foo' in cid
assert 'bar' in cid
def test_iterable_init(self):
cid = CaseInsensitiveDict([('Foo', 'foo'), ('BAr', 'bar')])
assert len(cid) == 2
assert 'foo' in cid
assert 'bar' in cid
def test_kwargs_init(self):
cid = CaseInsensitiveDict(FOO='foo', BAr='bar')
assert len(cid) == 2
assert 'foo' in cid
assert 'bar' in cid
def test_docstring_example(self):
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
assert cid['aCCEPT'] == 'application/json'
assert list(cid) == ['Accept']
def test_len(self):
cid = CaseInsensitiveDict({'a': 'a', 'b': 'b'})
cid['A'] = 'a'
assert len(cid) == 2
def test_getitem(self):
cid = CaseInsensitiveDict({'Spam': 'blueval'})
assert cid['spam'] == 'blueval'
assert cid['SPAM'] == 'blueval'
def test_fixes_649(self):
"""__setitem__ should behave case-insensitively."""
cid = CaseInsensitiveDict()
cid['spam'] = 'oneval'
cid['Spam'] = 'twoval'
cid['sPAM'] = 'redval'
cid['SPAM'] = 'blueval'
assert cid['spam'] == 'blueval'
assert cid['SPAM'] == 'blueval'
assert list(cid.keys()) == ['SPAM']
def test_delitem(self):
cid = CaseInsensitiveDict()
cid['Spam'] = 'someval'
del cid['sPam']
assert 'spam' not in cid
assert len(cid) == 0
def test_contains(self):
cid = CaseInsensitiveDict()
cid['Spam'] = 'someval'
assert 'Spam' in cid
assert 'spam' in cid
assert 'SPAM' in cid
assert 'sPam' in cid
assert 'notspam' not in cid
def test_get(self):
cid = CaseInsensitiveDict()
cid['spam'] = 'oneval'
cid['SPAM'] = 'blueval'
assert cid.get('spam') == 'blueval'
assert cid.get('SPAM') == 'blueval'
assert cid.get('sPam') == 'blueval'
assert cid.get('notspam', 'default') == 'default'
def test_update(self):
cid = CaseInsensitiveDict()
cid['spam'] = 'blueval'
cid.update({'sPam': 'notblueval'})
assert cid['spam'] == 'notblueval'
cid = CaseInsensitiveDict({'Foo': 'foo', 'BAr': 'bar'})
cid.update({'fOO': 'anotherfoo', 'bAR': 'anotherbar'})
assert len(cid) == 2
assert cid['foo'] == 'anotherfoo'
assert cid['bar'] == 'anotherbar'
def test_update_retains_unchanged(self):
cid = CaseInsensitiveDict({'foo': 'foo', 'bar': 'bar'})
cid.update({'foo': 'newfoo'})
assert cid['bar'] == 'bar'
def test_iter(self):
cid = CaseInsensitiveDict({'Spam': 'spam', 'Eggs': 'eggs'})
keys = frozenset(['Spam', 'Eggs'])
assert frozenset(iter(cid)) == keys
def test_equality(self):
cid = CaseInsensitiveDict({'SPAM': 'blueval', 'Eggs': 'redval'})
othercid = CaseInsensitiveDict({'spam': 'blueval', 'eggs': 'redval'})
assert cid == othercid
del othercid['spam']
assert cid != othercid
assert cid == {'spam': 'blueval', 'eggs': 'redval'}
def test_setdefault(self):
cid = CaseInsensitiveDict({'Spam': 'blueval'})
assert cid.setdefault('spam', 'notblueval') == 'blueval'
assert cid.setdefault('notspam', 'notblueval') == 'notblueval'
def test_lower_items(self):
cid = CaseInsensitiveDict({
'Accept': 'application/json',
'user-Agent': 'requests',
})
keyset = frozenset(lowerkey for lowerkey, v in cid.lower_items())
lowerkeyset = frozenset(['accept', 'user-agent'])
assert keyset == lowerkeyset
def test_preserve_key_case(self):
cid = CaseInsensitiveDict({
'Accept': 'application/json',
'user-Agent': 'requests',
})
keyset = frozenset(['Accept', 'user-Agent'])
assert frozenset(i[0] for i in cid.items()) == keyset
assert frozenset(cid.keys()) == keyset
assert frozenset(cid) == keyset
def test_preserve_last_key_case(self):
cid = CaseInsensitiveDict({
'Accept': 'application/json',
'user-Agent': 'requests',
})
cid.update({'ACCEPT': 'application/json'})
cid['USER-AGENT'] = 'requests'
keyset = frozenset(['ACCEPT', 'USER-AGENT'])
assert frozenset(i[0] for i in cid.items()) == keyset
assert frozenset(cid.keys()) == keyset
assert frozenset(cid) == keyset
class UtilsTestCase(unittest.TestCase):
def test_super_len_io_streams(self):
""" Ensures that we properly deal with different kinds of IO streams. """
# uses StringIO or io.StringIO (see import above)
from io import BytesIO
from requests.utils import super_len
assert super_len(StringIO.StringIO()) == 0
assert super_len(
StringIO.StringIO('with so much drama in the LBC')) == 29
assert super_len(BytesIO()) == 0
assert super_len(
BytesIO(b"it's kinda hard bein' snoop d-o-double-g")) == 40
try:
import cStringIO
except ImportError:
pass
else:
assert super_len(
cStringIO.StringIO('but some how, some way...')) == 25
def test_get_environ_proxies_ip_ranges(self):
"""Ensures that IP addresses are correctly matches with ranges
in no_proxy variable."""
from requests.utils import get_environ_proxies
os.environ['no_proxy'] = '192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1'
assert get_environ_proxies('http://192.168.0.1:5000/') == {}
assert get_environ_proxies('http://192.168.0.1/') == {}
assert get_environ_proxies('http://172.16.1.1/') == {}
assert get_environ_proxies('http://172.16.1.1:5000/') == {}
assert get_environ_proxies('http://192.168.1.1:5000/') != {}
assert get_environ_proxies('http://192.168.1.1/') != {}
def test_get_environ_proxies(self):
"""Ensures that IP addresses are correctly matches with ranges
in no_proxy variable."""
from requests.utils import get_environ_proxies
os.environ['no_proxy'] = '127.0.0.1,localhost.localdomain,192.168.0.0/24,172.16.1.1'
assert get_environ_proxies(
'http://localhost.localdomain:5000/v1.0/') == {}
assert get_environ_proxies('http://www.requests.com/') != {}
def test_guess_filename_when_int(self):
from requests.utils import guess_filename
assert None is guess_filename(1)
def test_guess_filename_when_filename_is_an_int(self):
from requests.utils import guess_filename
fake = type('Fake', (object,), {'name': 1})()
assert None is guess_filename(fake)
def test_guess_filename_with_file_like_obj(self):
from requests.utils import guess_filename
from requests import compat
fake = type('Fake', (object,), {'name': b'value'})()
guessed_name = guess_filename(fake)
assert b'value' == guessed_name
assert isinstance(guessed_name, compat.bytes)
def test_guess_filename_with_unicode_name(self):
from requests.utils import guess_filename
from requests import compat
filename = b'value'.decode('utf-8')
fake = type('Fake', (object,), {'name': filename})()
guessed_name = guess_filename(fake)
assert filename == guessed_name
assert isinstance(guessed_name, compat.str)
def test_is_ipv4_address(self):
from requests.utils import is_ipv4_address
assert is_ipv4_address('8.8.8.8')
assert not is_ipv4_address('8.8.8.8.8')
assert not is_ipv4_address('localhost.localdomain')
def test_is_valid_cidr(self):
from requests.utils import is_valid_cidr
assert not is_valid_cidr('8.8.8.8')
assert is_valid_cidr('192.168.1.0/24')
def test_dotted_netmask(self):
from requests.utils import dotted_netmask
assert dotted_netmask(8) == '255.0.0.0'
assert dotted_netmask(24) == '255.255.255.0'
assert dotted_netmask(25) == '255.255.255.128'
def test_address_in_network(self):
from requests.utils import address_in_network
assert address_in_network('192.168.1.1', '192.168.1.0/24')
assert not address_in_network('172.16.0.1', '192.168.1.0/24')
def test_get_auth_from_url(self):
"""Ensures that username and password in well-encoded URI as per
RFC 3986 are correclty extracted."""
from requests.utils import get_auth_from_url
from requests.compat import quote
percent_encoding_test_chars = "%!*'();:@&=+$,/?#[] "
url_address = 'request.com/url.html#test'
url = 'http://' + quote(
percent_encoding_test_chars, '') + ':' + quote(
percent_encoding_test_chars, '') + '@' + url_address
(username, password) = get_auth_from_url(url)
assert username == percent_encoding_test_chars
assert password == percent_encoding_test_chars
def test_requote_uri_with_unquoted_percents(self):
"""Ensure we handle unquoted percent signs in redirects.
See: https://github.com/kennethreitz/requests/issues/2356
"""
from requests.utils import requote_uri
bad_uri = 'http://example.com/fiz?buz=%ppicture'
quoted = 'http://example.com/fiz?buz=%25ppicture'
assert quoted == requote_uri(bad_uri)
def test_requote_uri_properly_requotes(self):
"""Ensure requoting doesn't break expectations."""
from requests.utils import requote_uri
quoted = 'http://example.com/fiz?buz=%25ppicture'
assert quoted == requote_uri(quoted)
class TestMorselToCookieExpires(unittest.TestCase):
"""Tests for morsel_to_cookie when morsel contains expires."""
def test_expires_valid_str(self):
"""Test case where we convert expires from string time."""
morsel = Morsel()
morsel['expires'] = 'Thu, 01-Jan-1970 00:00:01 GMT'
cookie = morsel_to_cookie(morsel)
assert cookie.expires == 1
def test_expires_invalid_int(self):
"""Test case where an invalid type is passed for expires."""
morsel = Morsel()
morsel['expires'] = 100
with pytest.raises(TypeError):
morsel_to_cookie(morsel)
def test_expires_invalid_str(self):
"""Test case where an invalid string is input."""
morsel = Morsel()
morsel['expires'] = 'woops'
with pytest.raises(ValueError):
morsel_to_cookie(morsel)
def test_expires_none(self):
"""Test case where expires is None."""
morsel = Morsel()
morsel['expires'] = None
cookie = morsel_to_cookie(morsel)
assert cookie.expires is None
class TestMorselToCookieMaxAge(unittest.TestCase):
"""Tests for morsel_to_cookie when morsel contains max-age."""
def test_max_age_valid_int(self):
"""Test case where a valid max age in seconds is passed."""
morsel = Morsel()
morsel['max-age'] = 60
cookie = morsel_to_cookie(morsel)
assert isinstance(cookie.expires, int)
def test_max_age_invalid_str(self):
"""Test case where a invalid max age is passed."""
morsel = Morsel()
morsel['max-age'] = 'woops'
with pytest.raises(TypeError):
morsel_to_cookie(morsel)
class TestTimeout:
def test_stream_timeout(self):
try:
requests.get(httpbin('delay/10'), timeout=2.0)
except requests.exceptions.Timeout as e:
assert 'Read timed out' in e.args[0].args[0]
def test_invalid_timeout(self):
with pytest.raises(ValueError) as e:
requests.get(httpbin('get'), timeout=(3, 4, 5))
assert '(connect, read)' in str(e)
with pytest.raises(ValueError) as e:
requests.get(httpbin('get'), timeout='foo')
assert 'must be an int or float' in str(e)
def test_none_timeout(self):
""" Check that you can set None as a valid timeout value.
To actually test this behavior, we'd want to check that setting the
timeout to None actually lets the request block past the system default
timeout. However, this would make the test suite unbearably slow.
Instead we verify that setting the timeout to None does not prevent the
request from succeeding.
"""
r = requests.get(httpbin('get'), timeout=None)
assert r.status_code == 200
def test_read_timeout(self):
try:
requests.get(httpbin('delay/10'), timeout=(None, 0.1))
assert False, 'The recv() request should time out.'
except ReadTimeout:
pass
def test_connect_timeout(self):
try:
requests.get(TARPIT, timeout=(0.1, None))
assert False, 'The connect() request should time out.'
except ConnectTimeout as e:
assert isinstance(e, ConnectionError)
assert isinstance(e, Timeout)
def test_total_timeout_connect(self):
try:
requests.get(TARPIT, timeout=(0.1, 0.1))
assert False, 'The connect() request should time out.'
except ConnectTimeout:
pass
def test_encoded_methods(self):
"""See: https://github.com/kennethreitz/requests/issues/2316"""
r = requests.request(b'GET', httpbin('get'))
assert r.ok
SendCall = collections.namedtuple('SendCall', ('args', 'kwargs'))
class RedirectSession(SessionRedirectMixin):
def __init__(self, order_of_redirects):
self.redirects = order_of_redirects
self.calls = []
self.max_redirects = 30
self.cookies = {}
self.trust_env = False
def send(self, *args, **kwargs):
self.calls.append(SendCall(args, kwargs))
return self.build_response()
def build_response(self):
request = self.calls[-1].args[0]
r = requests.Response()
try:
r.status_code = int(self.redirects.pop(0))
except IndexError:
r.status_code = 200
r.headers = CaseInsensitiveDict({'Location': '/'})
r.raw = self._build_raw()
r.request = request
return r
def _build_raw(self):
string = StringIO.StringIO('')
setattr(string, 'release_conn', lambda *args: args)
return string
class TestRedirects:
default_keyword_args = {
'stream': False,
'verify': True,
'cert': None,
'timeout': None,
'allow_redirects': False,
'proxies': {},
}
def test_requests_are_updated_each_time(self):
session = RedirectSession([303, 307])
prep = requests.Request('POST', httpbin('post')).prepare()
r0 = session.send(prep)
assert r0.request.method == 'POST'
assert session.calls[-1] == SendCall((r0.request,), {})
redirect_generator = session.resolve_redirects(r0, prep)
for response in redirect_generator:
assert response.request.method == 'GET'
send_call = SendCall((response.request,),
TestRedirects.default_keyword_args)
assert session.calls[-1] == send_call
@pytest.fixture
def list_of_tuples():
return [
(('a', 'b'), ('c', 'd')),
(('c', 'd'), ('a', 'b')),
(('a', 'b'), ('c', 'd'), ('e', 'f')),
]
def test_data_argument_accepts_tuples(list_of_tuples):
"""
Ensure that the data argument will accept tuples of strings
and properly encode them.
"""
for data in list_of_tuples:
p = PreparedRequest()
p.prepare(
method='GET',
url='http://www.example.com',
data=data,
hooks=default_hooks()
)
assert p.body == urlencode(data)
def assert_copy(p, p_copy):
for attr in ('method', 'url', 'headers', '_cookies', 'body', 'hooks'):
assert getattr(p, attr) == getattr(p_copy, attr)
def test_prepared_request_empty_copy():
p = PreparedRequest()
assert_copy(p, p.copy())
def test_prepared_request_no_cookies_copy():
p = PreparedRequest()
p.prepare(
method='GET',
url='http://www.example.com',
data='foo=bar',
hooks=default_hooks()
)
assert_copy(p, p.copy())
def test_prepared_request_complete_copy():
p = PreparedRequest()
p.prepare(
method='GET',
url='http://www.example.com',
data='foo=bar',
hooks=default_hooks(),
cookies={'foo': 'bar'}
)
assert_copy(p, p.copy())
def test_prepare_unicode_url():
p = PreparedRequest()
p.prepare(
method='GET',
url=u('http://www.example.com/üniçø∂é'),
)
assert_copy(p, p.copy())
def test_urllib3_retries():
from requests.packages.urllib3.util import Retry
s = requests.Session()
s.mount('http://', HTTPAdapter(max_retries=Retry(
total=2, status_forcelist=[500]
)))
with pytest.raises(RetryError):
s.get(httpbin('status/500'))
def test_vendor_aliases():
from requests.packages import urllib3
from requests.packages import chardet
with pytest.raises(ImportError):
from requests.packages import webbrowser
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,352,540,414,281,156,000 | 33.29243 | 112 | 0.577415 | false |
pschella/scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_T.py | 35 | 12866 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import (abs, arctan2, asarray, cos, exp, floor, log, log10,
arange, pi, sign, sin, sqrt, sum,
tan, tanh, atleast_2d)
from .go_benchmark import Benchmark
class TestTubeHolder(Benchmark):
r"""
TestTubeHolder objective function.
This class defines the TestTubeHolder [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{TestTubeHolder}}(x) = - 4 \left | {e^{\left|{\cos
\left(\frac{1}{200} x_{1}^{2} + \frac{1}{200} x_{2}^{2}\right)}
\right|}\sin\left(x_{1}\right) \cos\left(x_{2}\right)}\right|
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -10.872299901558` for
:math:`x= [-\pi/2, 0]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Jamil#148 has got incorrect equation, missing an abs around the square
brackets
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-pi / 2, 0.0]]
self.fglob = -10.87229990155800
def fun(self, x, *args):
self.nfev += 1
u = sin(x[0]) * cos(x[1])
v = (x[0] ** 2 + x[1] ** 2) / 200
return -4 * abs(u * exp(abs(cos(v))))
class Thurber(Benchmark):
r"""
Thurber [1]_ objective function.
.. [1] http://www.itl.nist.gov/div898/strd/nls/data/thurber.shtml
"""
def __init__(self, dimensions=7):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(
[500., 500., 100., 10., 0.1, 0.1, 0.],
[2000., 2000., 1000., 150., 2., 1., 0.2]))
self.global_optimum = [[1.288139680e3, 1.4910792535e3, 5.8323836877e2,
75.416644291, 0.96629502864, 0.39797285797,
4.9727297349e-2]]
self.fglob = 5642.7082397
self.a = asarray([80.574, 84.248, 87.264, 87.195, 89.076, 89.608,
89.868, 90.101, 92.405, 95.854, 100.696, 101.06,
401.672, 390.724, 567.534, 635.316, 733.054, 759.087,
894.206, 990.785, 1090.109, 1080.914, 1122.643,
1178.351, 1260.531, 1273.514, 1288.339, 1327.543,
1353.863, 1414.509, 1425.208, 1421.384, 1442.962,
1464.350, 1468.705, 1447.894, 1457.628])
self.b = asarray([-3.067, -2.981, -2.921, -2.912, -2.840, -2.797,
-2.702, -2.699, -2.633, -2.481, -2.363, -2.322,
-1.501, -1.460, -1.274, -1.212, -1.100, -1.046,
-0.915, -0.714, -0.566, -0.545, -0.400, -0.309,
-0.109, -0.103, 0.010, 0.119, 0.377, 0.790, 0.963,
1.006, 1.115, 1.572, 1.841, 2.047, 2.200])
def fun(self, x, *args):
self.nfev += 1
vec = x[0] + x[1] * self.b + x[2] * self.b ** 2 + x[3] * self.b ** 3
vec /= 1 + x[4] * self.b + x[5] * self.b ** 2 + x[6] * self.b ** 3
return sum((self.a - vec) ** 2)
class Treccani(Benchmark):
r"""
Treccani objective function.
This class defines the Treccani [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Treccani}}(x) = x_1^4 + 4x_1^3 + 4x_1^2 + x_2^2
with :math:`x_i \in
[-5, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-2, 0]` or
:math:`x = [0, 0]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [[-2.0, 0.0]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return x[0] ** 4 + 4.0 * x[0] ** 3 + 4.0 * x[0] ** 2 + x[1] ** 2
class Trefethen(Benchmark):
r"""
Trefethen objective function.
This class defines the Trefethen [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Trefethen}}(x) = 0.25 x_{1}^{2} + 0.25 x_{2}^{2}
+ e^{\sin\left(50 x_{1}\right)}
- \sin\left(10 x_{1} + 10 x_{2}\right)
+ \sin\left(60 e^{x_{2}}\right)
+ \sin\left[70 \sin\left(x_{1}\right)\right]
+ \sin\left[\sin\left(80 x_{2}\right)\right]
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -3.3068686474` for
:math:`x = [-0.02440307923, 0.2106124261]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[-0.02440307923, 0.2106124261]]
self.fglob = -3.3068686474
def fun(self, x, *args):
self.nfev += 1
val = 0.25 * x[0] ** 2 + 0.25 * x[1] ** 2
val += exp(sin(50. * x[0])) - sin(10 * x[0] + 10 * x[1])
val += sin(60 * exp(x[1]))
val += sin(70 * sin(x[0]))
val += sin(sin(80 * x[1]))
return val
class ThreeHumpCamel(Benchmark):
r"""
Three Hump Camel objective function.
This class defines the Three Hump Camel [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{ThreeHumpCamel}}(x) = 2x_1^2 - 1.05x_1^4 + \frac{x_1^6}{6}
+ x_1x_2 + x_2^2
with :math:`x_i \in [-5, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.custom_bounds = [(-2, 2), (-1.5, 1.5)]
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (2.0 * x[0] ** 2.0 - 1.05 * x[0] ** 4.0 + x[0] ** 6 / 6.0
+ x[0] * x[1] + x[1] ** 2.0)
class Trid(Benchmark):
r"""
Trid objective function.
This class defines the Trid [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Trid}}(x) = \sum_{i=1}^{n} (x_i - 1)^2
- \sum_{i=2}^{n} x_i x_{i-1}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-20, 20]` for :math:`i = 1, ..., 6`.
*Global optimum*: :math:`f(x) = -50` for :math:`x = [6, 10, 12, 12, 10, 6]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil#150, starting index of second summation term should be 2.
"""
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-20.0] * self.N, [20.0] * self.N))
self.global_optimum = [[6, 10, 12, 12, 10, 6]]
self.fglob = -50.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum((x - 1.0) ** 2.0) - sum(x[1:] * x[:-1])
class Trigonometric01(Benchmark):
r"""
Trigonometric 1 objective function.
This class defines the Trigonometric 1 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Trigonometric01}}(x) = \sum_{i=1}^{n} \left [n -
\sum_{j=1}^{n} \cos(x_j)
+ i \left(1 - cos(x_i)
- sin(x_i) \right ) \right]^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, \pi]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: equaiton uncertain here. Is it just supposed to be the cos term
in the inner sum, or the whole of the second line in Jamil #153.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [pi] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
i = atleast_2d(arange(1.0, self.N + 1)).T
inner = cos(x) + i * (1 - cos(x) - sin(x))
return sum((self.N - sum(inner, axis=1)) ** 2)
class Trigonometric02(Benchmark):
r"""
Trigonometric 2 objective function.
This class defines the Trigonometric 2 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Trigonometric2}}(x) = 1 + \sum_{i=1}^{n} 8 \sin^2
\left[7(x_i - 0.9)^2 \right]
+ 6 \sin^2 \left[14(x_i - 0.9)^2 \right]
+ (x_i - 0.9)^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-500, 500]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 1` for :math:`x_i = 0.9` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [[0.9 for _ in range(self.N)]]
self.fglob = 1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
vec = (8 * sin(7 * (x - 0.9) ** 2) ** 2
+ 6 * sin(14 * (x - 0.9) ** 2) ** 2
+ (x - 0.9) ** 2)
return 1.0 + sum(vec)
class Tripod(Benchmark):
r"""
Tripod objective function.
This class defines the Tripod [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Tripod}}(x) = p(x_2) \left[1 + p(x_1) \right] +
\lvert x_1 + 50p(x_2) \left[1 - 2p(x_1) \right]
\rvert + \lvert x_2 + 50\left[1 - 2p(x_2)\right]
\rvert
with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, -50]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[0.0, -50.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
p1 = float(x[0] >= 0)
p2 = float(x[1] >= 0)
return (p2 * (1.0 + p1) + abs(x[0] + 50.0 * p2 * (1.0 - 2.0 * p1))
+ abs(x[1] + 50.0 * (1.0 - 2.0 * p2)))
| bsd-3-clause | -3,311,406,547,242,495,000 | 31.737913 | 82 | 0.517177 | false |
justajeffy/anim-studio-tools | review_tool/sources/reviewTool/api/contexts/playlist.py | 5 | 7308 | ##
# \namespace reviewTool.contexts.playlist
#
# \remarks [desc::commented]
#
# \author Dr. D Studios
# \date 07/27/11
#
import datetime
import os.path
import re
import rv_tools
from .sequence import SequenceContext
from ..iosystem import IOSystem
from ..context import Context
from ..entity import Entity
from ..version import Version
from ..clip import Clip
from ...database import db
from ...kernel import core
from ...xml import XmlDocument
class PlaylistContext(Context):
def __init__( self, name, data = {} ):
super(PlaylistContext,self).__init__(name)
# set the custom properties
self._shotgunId = data.get('id')
self._createdAt = data.get('sg_date_and_time',datetime.datetime.now())
self._comments = data.get('description','')
self._department = data.get('sg_department',{}).get('name','')
self._filename = data.get('filename','')
self._lookupVersions = data.get('versions',[])
self._lookupBy = data.get('lookupBy','id')
def collectEntities( self ):
self.collectVersions()
return self.cachedValue('entities')
def collectVersions( self ):
if ( not self._lookupVersions ):
self.cache('entities',[])
self.cache('clips',[])
self.cache('versions',[])
return []
# collect all the versions who match the inputed criteria
sg_versions = []
lkey = self._lookupBy
lookup_versions = [[lkey,'is',version[lkey]] for version in self._lookupVersions]
page = 0
page_max = 10
lcount = len(lookup_versions)
while (page < lcount):
filters = lookup_versions[page:page+page_max]
fields = Version.ShotgunFields + ['entity']
# collect all the shots that are part of this scene from shotgun
sg_versions += db.session().find('Version',filters,fields,filter_operator='any')
page += page_max
# create a mapping to the versions by their code
sg_version_map = {}
for sg_version in sg_versions:
# ensure that we're only using creative renders when looking up by code
# since we can have duplicate entries
if ( lkey == 'code' and not 'creative' in sg_version['sg_tank_address'].lower() ):
continue
sg_version_map[sg_version[lkey]] = sg_version
entities = []
versions = []
# extract the entity and version information from the clip versions
for i, lookup_version in enumerate(self._lookupVersions):
# pull the looked up version based on the key
sg_version = sg_version_map.get(lookup_version[lkey])
if ( not sg_version ):
continue
# retrieve the entity
sg_entity = sg_version['entity']
# create the entity instance
entity = Entity( self, sg_entity['type'], sg_entity['name'], sg_entity['id'] )
sortOrder = sg_entity.get('sg_cut_order')
if ( sortOrder == None ):
sortOrder = SequenceContext.generateSortOrder(entity.name())
# create the version instance
lookup_version.update(sg_version)
version = Version( entity, lookup_version )
version.setActive(True)
version.setDefaultPlaylistOrder(i)
# cache the values
entities.append(entity)
versions.append(version)
self.cache('entities',entities)
return versions
def comments( self ):
return self._comments
def createdAt( self ):
return self._createdAt
def department( self ):
return self._department
def filename( self ):
return self._filename
def setShotgunId( self, shotgunId ):
self._shotgunId = shotgunId
def shotgunId( self ):
return self._shotgunId
@staticmethod
def fromFile( filename ):
filename = str(filename)
# extract the system to be used for loading the inputed file
ftype = os.path.splitext(filename)[1]
system = IOSystem.findByType(ftype)
if ( not system ):
return None
# load the context from the system
return system.load(filename)
@staticmethod
def fromXml( name, xml, filename = '' ):
"""
Creates a new PlaylistContext instance with the given name by
loading all relavent data from the xml file
:param name:
:type <str>:
:param xml:
:type <XmlElement>:
:param filname:
:type <str>
:return <PlaylistContext>:
"""
# create the new playlist context
output = PlaylistContext( name, {'filename': filename })
# load the clip data
xclips = xml.findChild('clips')
if ( xclips ):
entities = []
clips = []
versions = []
# load the clip data from the xml file
for xclip in xclips.children():
clip = Clip.fromXml(xclip)
if ( not clip ):
continue
# store the version
version = clip.version()
version.setActive(True)
# store the entity
entity = clip.entity()
entity.setContext(output)
# store the clip
clips.append(clip)
versions.append(version)
entities.append(entity)
# store the defaults
version.setDefaultOptions( clip )
output.cache( 'entities', entities )
output.cache( 'clips', clips )
output.cache( 'versions', versions )
return output
# Copyright 2008-2012 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios)
#
# This file is part of anim-studio-tools.
#
# anim-studio-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# anim-studio-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with anim-studio-tools. If not, see <http://www.gnu.org/licenses/>.
| gpl-3.0 | -637,507,753,314,924,400 | 32.677419 | 99 | 0.53503 | false |
wolfier/incubator-airflow | tests/executors/test_executor.py | 15 | 2125 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.executors.base_executor import BaseExecutor
from airflow.utils.state import State
from airflow import settings
class TestExecutor(BaseExecutor):
"""
TestExecutor is used for unit testing purposes.
"""
def __init__(self, do_update=False, *args, **kwargs):
self.do_update = do_update
self._running = []
self.history = []
super(TestExecutor, self).__init__(*args, **kwargs)
def execute_async(self, key, command, queue=None):
self.log.debug("{} running task instances".format(len(self.running)))
self.log.debug("{} in queue".format(len(self.queued_tasks)))
def heartbeat(self):
session = settings.Session()
if self.do_update:
self.history.append(list(self.queued_tasks.values()))
while len(self._running) > 0:
ti = self._running.pop()
ti.set_state(State.SUCCESS, session)
for key, val in list(self.queued_tasks.items()):
(command, priority, queue, ti) = val
ti.set_state(State.RUNNING, session)
self._running.append(ti)
self.queued_tasks.pop(key)
session.commit()
session.close()
def terminate(self):
pass
def end(self):
self.sync()
| apache-2.0 | -7,909,010,839,748,441,000 | 33.836066 | 77 | 0.653176 | false |
polarise/BioClasses | TransitionMatrix.py | 1 | 8463 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import division
import sys
import math
import itertools
import cPickle
from Bio import SeqIO
class TransitionMatrix( object ):
def __init__( self ):
"""
Constructor
"""
self.transition_counts = dict()
self.initial_counts = dict()
self.transition_probabilities = dict()
self.initial_probabilities = dict()
#*****************************************************************************
def __repr__( self ):
data = ""
keys = self.transition_probabilities.keys()
keys.sort()
data += " " + " ".join( keys ) + "\n"
for codon in keys:
row = [ codon ]
for next_codon in keys:
row.append( "%.3f" % self.transition_probabilities[codon][next_codon] )
data += " ".join( row ) + "\n"
return data
#*****************************************************************************
def row_sums( self ):
keys = self.transition_probabilities.keys()
keys.sort()
for k in keys:
print k, sum( self.transition_probabilities[k].values() )
#*****************************************************************************
def col_sums( self ):
keys = self.transition_probabilities.keys()
keys.sort()
for k in keys:
colsum = 0
for l in keys:
colsum += self.transition_probabilities[l][k]
print k, colsum
#*****************************************************************************
def all_sum( self ):
keys = self.transition_probabilities.keys()
keys.sort()
sum = 0
for k in keys:
for l in keys:
sum += self.transition_probabilities[k][l]
print sum
#*****************************************************************************
def build( self, fastafile ):
# count the transitions
self.transition_counts = dict()
self.initial_counts = dict()
for seq_record in SeqIO.parse( fastafile, "fasta" ):
sequence = str( seq_record.seq )
if sequence[:3] == "Seq":
print >> sys.stderr, "Warning: for %s sequence is unavailable..." % \
seq_record.id
continue
i = 0
while i <= len( sequence ) - 6:
codon = sequence[i:i+3]
next_codon = sequence[i+3:i+6]
if codon.find( "N" ) >= 0 or next_codon.find( "N" ) >= 0 or \
codon.find( "R" ) >= 0 or next_codon.find( "R" ) >= 0: # we have 'N' or 'R' (?)
print >> sys.stderr, "Warning: found 'N'... truncating CDS."
break # break because they will mess the reading frame!!! :-(
# initial counts
if i == 0:
if codon not in self.initial_counts:
self.initial_counts[codon] = 1
else:
self.initial_counts[codon] += 1
if codon not in self.transition_counts:
self.transition_counts[codon] = dict()
if next_codon not in self.transition_counts[codon]:
self.transition_counts[codon][next_codon] = 1
else:
self.transition_counts[codon][next_codon] += 1
i += 3
# the are some empty transitions e.g. Stop->Ci
# we fill these with low but non-zero counts
# add a pseudocount of 1 to everything
itercodons = itertools.product( "ACGT", repeat=3 )
all_codons = [ "".join( codon ) for codon in itercodons ]
for C1 in all_codons:
try:
a_codon = self.transition_counts[C1] # C1 is present
for C2 in all_codons:
try:
self.transition_counts[C1][C2] += 1 # C1 and C2 are present
except KeyError:
self.transition_counts[C1][C2] = 1 # C1 present; C2 absent
except KeyError:
self.transition_counts[C1] = dict() # neither C1 nor C2 are present
for C2 in all_codons:
self.transition_counts[C1][C2] = 1
# do the same for initial counts
for C1 in all_codons:
try:
self.initial_counts[C1] += 1
except KeyError:
self.initial_counts[C1] = 1
# normalise the counts into probabilities
self.transition_probabilities = dict()
for codon in self.transition_counts:
self.transition_probabilities[codon] = dict()
total = sum( self.transition_counts[codon].values() )
for next_codon in self.transition_counts[codon]:
self.transition_probabilities[codon][next_codon] = \
self.transition_counts[codon][next_codon]/total
self.initial_probabilities = dict()
total = sum( self.initial_counts.values())
for codon in self.initial_counts:
self.initial_probabilities[codon] = self.initial_counts[codon]/total
#*****************************************************************************
def probability( self, C1, C2, loglik=True, logbase=math.exp( 1 ) ): # probability of C2 given C1 i.e. C1->C2
if loglik:
return math.log( self.transition_probabilities[C1][C2], logbase )
else:
return self.transition_probabilities[C1][C2]
#*****************************************************************************
def likelihood( self, sequence, loglik=True, initial=False ): # sequence likelihood
if loglik:
loglikelihood = 0
if len( sequence ) >= 3 and initial:
loglikelihood += math.log( self.initial_probabilities[sequence[:3]] )
i = 3
while i <= len( sequence ) - 6:
codon = sequence[i:i+3]
next_codon = sequence[i+3:i+6]
loglikelihood += self.probability( codon, next_codon, loglik )
i += 3
return loglikelihood
else: # potential problem naming a variable like the function!!!
likelihood = 1
if len( sequence ) >= 3 and initial:
likelihood *= self.initial_probabilities[sequence[:3]]
i = 3
while i <= len( sequence ) - 6:
codon = sequence[i:i+3]
next_codon = sequence[i+3:i+6]
likelihood *= self.probability( codon, next_codon, loglik )
i += 3
return likelihood
#*****************************************************************************
def graded_likelihood( self, sequence, loglik=True, initial=False ):
# graded - cumulative likelihood across sequence
if loglik:
graded_loglikelihood = list()
loglikelihood = 0
if len( sequence ) >= 3 and initial:
loglikelihood = math.log( self.initial_probabilities[sequence[:3]] )
graded_loglikelihood.append( loglikelihood )
i = 3
while i <= len( sequence ) - 6:
codon = sequence[i:i+3]
next_codon = sequence[i+3:i+6]
loglikelihood += self.probability( codon, next_codon, loglik )
graded_loglikelihood.append( loglikelihood )
i += 3
return graded_loglikelihood
else: # potential problem naming a variable like the function!!!
graded_likelihood = list()
likelihood = 1
if len( sequence ) >= 3 and initial:
likelihood = self.initial_probabilities[sequence[:3]]
graded_likelihood.append( likelihood )
i = 3
while i <= len( sequence ) - 6:
codon = sequence[i:i+3]
next_codon = sequence[i+3:i+6]
likelihood += self.probability( codon, next_codon, loglik )
graded_likelihood.append( likelihood )
i += 3
return graded_likelihood
#*****************************************************************************
def differential_graded_likelihood( self, sequence, loglik=True, initial=False ):
# minus uniform random sequence
if loglik:
diff_graded_loglikelihood = list()
graded_loglikelihood = self.graded_likelihood( sequence, loglik, \
initial=initial )
diff_graded_loglikelihood = [ graded_loglikelihood[i] + \
( i + 1 )*math.log( 64 ) for i in xrange( len( graded_loglikelihood ))]
return diff_graded_loglikelihood
else: # potential problem naming a variable like the function!!!
diff_graded_likelihood = list()
graded_likelihood = self.graded_likelihood( sequence, initial=initial )
diff_graded_likelihood = [ 64**( i + 1 )*graded_likelihood[i]]
return diff_graded_likelihood
#*****************************************************************************
def likelihood_slope( self, dgl ):
"""
dgl = a differential gradient likelihood list
"""
if len( dgl ) == 0:
return None
elif len( dgl ) == 1:
return 0
else:
return ( dgl[-1] - dgl[0] )/len( dgl )
#*****************************************************************************
def write( self, outfile ):
data = self.transition_counts, self.transition_probabilities, \
self.initial_counts, self.initial_probabilities
with open( outfile, 'w' ) as f:
cPickle.dump( data, f, cPickle.HIGHEST_PROTOCOL )
#*****************************************************************************
def read( self, infile ):
with open( infile ) as f:
data = cPickle.load( f )
self.transition_counts, self.transition_probabilities, \
self.initial_counts, self.initial_probabilities = data | gpl-2.0 | -8,355,798,465,676,707,000 | 32.322835 | 110 | 0.581945 | false |
tknorris/script.trakt | syncMovies.py | 1 | 17880 | # -*- coding: utf-8 -*-
import copy
import utilities
import kodiUtilities
import logging
from kodiUtilities import notification
logger = logging.getLogger(__name__)
class SyncMovies():
def __init__(self, sync, progress):
self.sync = sync
if not self.sync.show_progress and sync.sync_on_update and sync.notify and self.sync.notify_during_playback:
notification('%s %s' % (kodiUtilities.getString(32045), kodiUtilities.getString(32046)), kodiUtilities.getString(32061)) # Sync started
if sync.show_progress and not sync.run_silent:
progress.create("%s %s" % (kodiUtilities.getString(32045), kodiUtilities.getString(32046)), line1=" ", line2=" ", line3=" ")
kodiMovies = self.__kodiLoadMovies()
if not isinstance(kodiMovies, list) and not kodiMovies:
logger.debug("[Movies Sync] Kodi movie list is empty, aborting movie Sync.")
if sync.show_progress and not sync.run_silent:
progress.close()
return
try:
traktMovies = self.__traktLoadMovies()
except Exception:
logger.debug("[Movies Sync] Error getting Trakt.tv movie list, aborting movie Sync.")
if sync.show_progress and not sync.run_silent:
progress.close()
return
traktMoviesProgress = self.__traktLoadMoviesPlaybackProgress(25, 36)
self.__addMoviesToTraktCollection(kodiMovies, traktMovies, 37, 47)
self.__deleteMoviesFromTraktCollection(traktMovies, kodiMovies, 48, 58)
self.__addMoviesToTraktWatched(kodiMovies, traktMovies, 59, 69)
self.__addMoviesToKodiWatched(traktMovies, kodiMovies, 70, 80)
self.__addMovieProgressToKodi(traktMoviesProgress, kodiMovies, 81, 91)
self.__syncMovieRatings(traktMovies, kodiMovies, 92, 99)
if sync.show_progress and not sync.run_silent:
self.sync.UpdateProgress(100, line1=kodiUtilities.getString(32066), line2=" ", line3=" ")
progress.close()
if not sync.show_progress and sync.sync_on_update and sync.notify and sync.notify_during_playback:
notification('%s %s' % (kodiUtilities.getString(32045), kodiUtilities.getString(32046)), kodiUtilities.getString(32062)) # Sync complete
logger.debug("[Movies Sync] Movies on Trakt.tv (%d), movies in Kodi (%d)." % (len(traktMovies), len(kodiMovies)))
logger.debug("[Movies Sync] Complete.")
def __kodiLoadMovies(self):
self.sync.UpdateProgress(1, line2=kodiUtilities.getString(32079))
logger.debug("[Movies Sync] Getting movie data from Kodi")
data = kodiUtilities.kodiJsonRequest({'jsonrpc': '2.0', 'id': 0, 'method': 'VideoLibrary.GetMovies', 'params': {'properties': ['title', 'imdbnumber', 'year', 'playcount', 'lastplayed', 'file', 'dateadded', 'runtime', 'userrating']}})
if data['limits']['total'] == 0:
logger.debug("[Movies Sync] Kodi JSON request was empty.")
return
kodi_movies = kodiUtilities.kodiRpcToTraktMediaObjects(data)
self.sync.UpdateProgress(10, line2=kodiUtilities.getString(32080))
return kodi_movies
def __traktLoadMovies(self):
self.sync.UpdateProgress(10, line1=kodiUtilities.getString(32079), line2=kodiUtilities.getString(32081))
logger.debug("[Movies Sync] Getting movie collection from Trakt.tv")
traktMovies = {}
traktMovies = self.sync.traktapi.getMoviesCollected(traktMovies)
self.sync.UpdateProgress(17, line2=kodiUtilities.getString(32082))
traktMovies = self.sync.traktapi.getMoviesWatched(traktMovies)
traktMovies = self.sync.traktapi.getMoviesRated(traktMovies)
traktMovies = traktMovies.items()
self.sync.UpdateProgress(24, line2=kodiUtilities.getString(32083))
movies = []
for _, movie in traktMovies:
movie = movie.to_dict()
movies.append(movie)
return movies
def __traktLoadMoviesPlaybackProgress(self, fromPercent, toPercent):
if kodiUtilities.getSettingAsBool('trakt_movie_playback') and not self.sync.IsCanceled():
self.sync.UpdateProgress(fromPercent, line2=kodiUtilities.getString(32122))
logger.debug('[Movies Sync] Getting playback progress from Trakt.tv')
try:
traktProgressMovies = self.sync.traktapi.getMoviePlaybackProgress()
except Exception:
logger.debug("[Movies Sync] Invalid Trakt.tv playback progress list, possible error getting data from Trakt, aborting Trakt.tv playback update.")
return False
i = 0
x = float(len(traktProgressMovies))
moviesProgress = {'movies': []}
for movie in traktProgressMovies:
i += 1
y = ((i / x) * (toPercent-fromPercent)) + fromPercent
self.sync.UpdateProgress(int(y), line2=kodiUtilities.getString(32123) % (i, x))
# will keep the data in python structures - just like the KODI response
movie = movie.to_dict()
moviesProgress['movies'].append(movie)
self.sync.UpdateProgress(toPercent, line2=kodiUtilities.getString(32124))
return moviesProgress
def __addMoviesToTraktCollection(self, kodiMovies, traktMovies, fromPercent, toPercent):
if kodiUtilities.getSettingAsBool('add_movies_to_trakt') and not self.sync.IsCanceled():
addTraktMovies = copy.deepcopy(traktMovies)
addKodiMovies = copy.deepcopy(kodiMovies)
traktMoviesToAdd = utilities.compareMovies(addKodiMovies, addTraktMovies)
utilities.sanitizeMovies(traktMoviesToAdd)
logger.debug("[Movies Sync] Compared movies, found %s to add." % len(traktMoviesToAdd))
if len(traktMoviesToAdd) == 0:
self.sync.UpdateProgress(toPercent, line2=kodiUtilities.getString(32084))
logger.debug("[Movies Sync] Trakt.tv movie collection is up to date.")
return
titles = ", ".join(["%s" % (m['title']) for m in traktMoviesToAdd])
logger.debug("[Movies Sync] %i movie(s) will be added to Trakt.tv collection." % len(traktMoviesToAdd))
logger.debug("[Movies Sync] Movies to add : %s" % titles)
self.sync.UpdateProgress(fromPercent, line2=kodiUtilities.getString(32063) % len(traktMoviesToAdd))
moviesToAdd = {'movies': traktMoviesToAdd}
# logger.debug("Movies to add: %s" % moviesToAdd)
try:
self.sync.traktapi.addToCollection(moviesToAdd)
except Exception as ex:
message = utilities.createError(ex)
logging.fatal(message)
self.sync.UpdateProgress(toPercent, line2=kodiUtilities.getString(32085) % len(traktMoviesToAdd))
def __deleteMoviesFromTraktCollection(self, traktMovies, kodiMovies, fromPercent, toPercent):
if kodiUtilities.getSettingAsBool('clean_trakt_movies') and not self.sync.IsCanceled():
removeTraktMovies = copy.deepcopy(traktMovies)
removeKodiMovies = copy.deepcopy(kodiMovies)
logger.debug("[Movies Sync] Starting to remove.")
traktMoviesToRemove = utilities.compareMovies(removeTraktMovies, removeKodiMovies)
utilities.sanitizeMovies(traktMoviesToRemove)
logger.debug("[Movies Sync] Compared movies, found %s to remove." % len(traktMoviesToRemove))
if len(traktMoviesToRemove) == 0:
self.sync.UpdateProgress(toPercent, line2=kodiUtilities.getString(32091))
logger.debug("[Movies Sync] Trakt.tv movie collection is clean, no movies to remove.")
return
titles = ", ".join(["%s" % (m['title']) for m in traktMoviesToRemove])
logger.debug("[Movies Sync] %i movie(s) will be removed from Trakt.tv collection." % len(traktMoviesToRemove))
logger.debug("[Movies Sync] Movies removed: %s" % titles)
self.sync.UpdateProgress(fromPercent, line2=kodiUtilities.getString(32076) % len(traktMoviesToRemove))
moviesToRemove = {'movies': traktMoviesToRemove}
try:
self.sync.traktapi.removeFromCollection(moviesToRemove)
except Exception as ex:
message = utilities.createError(ex)
logging.fatal(message)
self.sync.UpdateProgress(toPercent, line2=kodiUtilities.getString(32092) % len(traktMoviesToRemove))
def __addMoviesToTraktWatched(self, kodiMovies, traktMovies, fromPercent, toPercent):
if kodiUtilities.getSettingAsBool('trakt_movie_playcount') and not self.sync.IsCanceled():
updateTraktTraktMovies = copy.deepcopy(traktMovies)
updateTraktKodiMovies = copy.deepcopy(kodiMovies)
traktMoviesToUpdate = utilities.compareMovies(updateTraktKodiMovies, updateTraktTraktMovies, watched=True)
utilities.sanitizeMovies(traktMoviesToUpdate)
if len(traktMoviesToUpdate) == 0:
self.sync.UpdateProgress(toPercent, line2=kodiUtilities.getString(32086))
logger.debug("[Movies Sync] Trakt.tv movie playcount is up to date")
return
titles = ", ".join(["%s" % (m['title']) for m in traktMoviesToUpdate])
logger.debug("[Movies Sync] %i movie(s) playcount will be updated on Trakt.tv" % len(traktMoviesToUpdate))
logger.debug("[Movies Sync] Movies updated: %s" % titles)
self.sync.UpdateProgress(fromPercent, line2=kodiUtilities.getString(32064) % len(traktMoviesToUpdate))
# Send request to update playcounts on Trakt.tv
chunksize = 200
chunked_movies = utilities.chunks([movie for movie in traktMoviesToUpdate], chunksize)
errorcount = 0
i = 0
x = float(len(traktMoviesToUpdate))
for chunk in chunked_movies:
if self.sync.IsCanceled():
return
i += 1
y = ((i / x) * (toPercent-fromPercent)) + fromPercent
self.sync.UpdateProgress(int(y), line2=kodiUtilities.getString(32093) % ((i) * chunksize if (i) * chunksize < x else x, x))
params = {'movies': chunk}
# logger.debug("moviechunk: %s" % params)
try:
self.sync.traktapi.addToHistory(params)
except Exception as ex:
message = utilities.createError(ex)
logging.fatal(message)
errorcount += 1
logger.debug("[Movies Sync] Movies updated: %d error(s)" % errorcount)
self.sync.UpdateProgress(toPercent, line2=kodiUtilities.getString(32087) % len(traktMoviesToUpdate))
def __addMoviesToKodiWatched(self, traktMovies, kodiMovies, fromPercent, toPercent):
if kodiUtilities.getSettingAsBool('kodi_movie_playcount') and not self.sync.IsCanceled():
updateKodiTraktMovies = copy.deepcopy(traktMovies)
updateKodiKodiMovies = copy.deepcopy(kodiMovies)
kodiMoviesToUpdate = utilities.compareMovies(updateKodiTraktMovies, updateKodiKodiMovies, watched=True, restrict=True)
if len(kodiMoviesToUpdate) == 0:
self.sync.UpdateProgress(toPercent, line2=kodiUtilities.getString(32088))
logger.debug("[Movies Sync] Kodi movie playcount is up to date.")
return
titles = ", ".join(["%s" % (m['title']) for m in kodiMoviesToUpdate])
logger.debug("[Movies Sync] %i movie(s) playcount will be updated in Kodi" % len(kodiMoviesToUpdate))
logger.debug("[Movies Sync] Movies to add: %s" % titles)
self.sync.UpdateProgress(fromPercent, line2=kodiUtilities.getString(32065) % len(kodiMoviesToUpdate))
# split movie list into chunks of 50
chunksize = 50
chunked_movies = utilities.chunks([{"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": {"movieid": kodiMoviesToUpdate[i]['movieid'], "playcount": kodiMoviesToUpdate[i]['plays'], "lastplayed": utilities.convertUtcToDateTime(kodiMoviesToUpdate[i]['last_watched_at'])}, "id": i} for i in range(len(kodiMoviesToUpdate))], chunksize)
i = 0
x = float(len(kodiMoviesToUpdate))
for chunk in chunked_movies:
if self.sync.IsCanceled():
return
i += 1
y = ((i / x) * (toPercent-fromPercent)) + fromPercent
self.sync.UpdateProgress(int(y), line2=kodiUtilities.getString(32089) % ((i) * chunksize if (i) * chunksize < x else x, x))
kodiUtilities.kodiJsonRequest(chunk)
self.sync.UpdateProgress(toPercent, line2=kodiUtilities.getString(32090) % len(kodiMoviesToUpdate))
def __addMovieProgressToKodi(self, traktMovies, kodiMovies, fromPercent, toPercent):
if kodiUtilities.getSettingAsBool('trakt_movie_playback') and traktMovies and not self.sync.IsCanceled():
updateKodiTraktMovies = copy.deepcopy(traktMovies)
updateKodiKodiMovies = copy.deepcopy(kodiMovies)
kodiMoviesToUpdate = utilities.compareMovies(updateKodiTraktMovies['movies'], updateKodiKodiMovies, restrict=True, playback=True)
if len(kodiMoviesToUpdate) == 0:
self.sync.UpdateProgress(toPercent, line1='', line2=kodiUtilities.getString(32125))
logger.debug("[Movies Sync] Kodi movie playbacks are up to date.")
return
logger.debug("[Movies Sync] %i movie(s) playbacks will be updated in Kodi" % len(kodiMoviesToUpdate))
self.sync.UpdateProgress(fromPercent, line1='', line2=kodiUtilities.getString(32126) % len(kodiMoviesToUpdate))
# need to calculate the progress in int from progress in percent from Trakt
# split movie list into chunks of 50
chunksize = 50
chunked_movies = utilities.chunks([{"jsonrpc": "2.0", "id": i, "method": "VideoLibrary.SetMovieDetails", "params": {"movieid": kodiMoviesToUpdate[i]['movieid'], "resume": {"position": kodiMoviesToUpdate[i]['runtime'] / 100.0 * kodiMoviesToUpdate[i]['progress'], "total": kodiMoviesToUpdate[i]['runtime']}}} for i in range(len(kodiMoviesToUpdate))], chunksize)
i = 0
x = float(len(kodiMoviesToUpdate))
for chunk in chunked_movies:
if self.sync.IsCanceled():
return
i += 1
y = ((i / x) * (toPercent-fromPercent)) + fromPercent
self.sync.UpdateProgress(int(y), line2=kodiUtilities.getString(32127) % ((i) * chunksize if (i) * chunksize < x else x, x))
kodiUtilities.kodiJsonRequest(chunk)
self.sync.UpdateProgress(toPercent, line2=kodiUtilities.getString(32128) % len(kodiMoviesToUpdate))
def __syncMovieRatings(self, traktMovies, kodiMovies, fromPercent, toPercent):
if kodiUtilities.getSettingAsBool('trakt_sync_ratings') and traktMovies and not self.sync.IsCanceled():
updateKodiTraktMovies = copy.deepcopy(traktMovies)
updateKodiKodiMovies = copy.deepcopy(kodiMovies)
traktMoviesToUpdate = utilities.compareMovies(updateKodiKodiMovies, updateKodiTraktMovies, rating=True)
if len(traktMoviesToUpdate) == 0:
self.sync.UpdateProgress(toPercent, line1='', line2=kodiUtilities.getString(32179))
logger.debug("[Movies Sync] Trakt movie ratings are up to date.")
else:
logger.debug("[Movies Sync] %i movie(s) ratings will be updated on Trakt" % len(traktMoviesToUpdate))
self.sync.UpdateProgress(fromPercent, line1='', line2=kodiUtilities.getString(32180) % len(traktMoviesToUpdate))
moviesRatings = {'movies': traktMoviesToUpdate}
self.sync.traktapi.addRating(moviesRatings)
kodiMoviesToUpdate = utilities.compareMovies(updateKodiTraktMovies, updateKodiKodiMovies, restrict=True, rating=True)
if len(kodiMoviesToUpdate) == 0:
self.sync.UpdateProgress(toPercent, line1='', line2=kodiUtilities.getString(32169))
logger.debug("[Movies Sync] Kodi movie ratings are up to date.")
else:
logger.debug("[Movies Sync] %i movie(s) ratings will be updated in Kodi" % len(kodiMoviesToUpdate))
self.sync.UpdateProgress(fromPercent, line1='', line2=kodiUtilities.getString(32170) % len(kodiMoviesToUpdate))
# split movie list into chunks of 50
chunksize = 50
chunked_movies = utilities.chunks([{"jsonrpc": "2.0", "id": i, "method": "VideoLibrary.SetMovieDetails",
"params": {"movieid": kodiMoviesToUpdate[i]['movieid'],
"userrating": kodiMoviesToUpdate[i]['rating']}} for i in range(len(kodiMoviesToUpdate))],
chunksize)
i = 0
x = float(len(kodiMoviesToUpdate))
for chunk in chunked_movies:
if self.sync.IsCanceled():
return
i += 1
y = ((i / x) * (toPercent-fromPercent)) + fromPercent
self.sync.UpdateProgress(int(y), line2=kodiUtilities.getString(32171) % ((i) * chunksize if (i) * chunksize < x else x, x))
kodiUtilities.kodiJsonRequest(chunk)
self.sync.UpdateProgress(toPercent, line2=kodiUtilities.getString(32172) % len(kodiMoviesToUpdate))
| gpl-2.0 | 6,765,245,835,865,363,000 | 51.899408 | 371 | 0.635515 | false |
weigj/django-multidb | tests/modeltests/get_or_create/models.py | 17 | 2572 | """
33. get_or_create()
``get_or_create()`` does what it says: it tries to look up an object with the
given parameters. If an object isn't found, it creates one with the given
parameters.
"""
from django.db import models, IntegrityError
class Person(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
birthday = models.DateField()
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
class ManualPrimaryKeyTest(models.Model):
id = models.IntegerField(primary_key=True)
data = models.CharField(max_length=100)
__test__ = {'API_TESTS':"""
# Acting as a divine being, create an Person.
>>> from datetime import date
>>> p = Person(first_name='John', last_name='Lennon', birthday=date(1940, 10, 9))
>>> p.save()
# Only one Person is in the database at this point.
>>> Person.objects.count()
1
# get_or_create() a person with similar first names.
>>> p, created = Person.objects.get_or_create(first_name='John', last_name='Lennon', defaults={'birthday': date(1940, 10, 9)})
# get_or_create() didn't have to create an object.
>>> created
False
# There's still only one Person in the database.
>>> Person.objects.count()
1
# get_or_create() a Person with a different name.
>>> p, created = Person.objects.get_or_create(first_name='George', last_name='Harrison', defaults={'birthday': date(1943, 2, 25)})
>>> created
True
>>> Person.objects.count()
2
# If we execute the exact same statement, it won't create a Person.
>>> p, created = Person.objects.get_or_create(first_name='George', last_name='Harrison', defaults={'birthday': date(1943, 2, 25)})
>>> created
False
>>> Person.objects.count()
2
# If you don't specify a value or default value for all required fields, you
# will get an error.
>>> try:
... p, created = Person.objects.get_or_create(first_name='Tom', last_name='Smith')
... except Exception, e:
... if isinstance(e, IntegrityError):
... print "Pass"
... else:
... print "Fail with %s" % type(e)
Pass
# If you specify an existing primary key, but different other fields, then you
# will get an error and data will not be updated.
>>> m = ManualPrimaryKeyTest(id=1, data='Original')
>>> m.save()
>>> try:
... m, created = ManualPrimaryKeyTest.objects.get_or_create(id=1, data='Different')
... except Exception, e:
... if isinstance(e, IntegrityError):
... print "Pass"
... else:
... print "Fail with %s" % type(e)
Pass
>>> ManualPrimaryKeyTest.objects.get(id=1).data == 'Original'
True
"""}
| bsd-3-clause | 5,109,461,865,224,321,000 | 29.987952 | 130 | 0.670684 | false |
grahamhayes/designate | designate/tests/test_coordination.py | 1 | 10303 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from designate import coordination
from designate import service
from designate.tests import fixtures
from designate.tests import TestCase
cfg.CONF.register_group(cfg.OptGroup("service:dummy"))
cfg.CONF.register_opts([
], group="service:dummy")
class CoordinatedService(coordination.CoordinationMixin, service.Service):
@property
def service_name(self):
return "dummy"
class TestCoordinationMixin(TestCase):
def setUp(self):
super(TestCoordinationMixin, self).setUp()
self.config(backend_url="zake://", group="coordination")
def test_start(self):
service = CoordinatedService()
service.start()
self.assertTrue(service._coordination_started)
self.assertIn(service.service_name.encode('utf-8'),
service._coordinator.get_groups().get())
self.assertIn(service._coordination_id.encode('utf-8'),
service._coordinator.get_members(
service.service_name).get())
service.stop()
def test_stop(self):
service = CoordinatedService()
service.start()
service.stop()
self.assertFalse(service._coordination_started)
def test_start_no_coordination(self):
self.config(backend_url=None, group="coordination")
service = CoordinatedService()
service.start()
self.assertIsNone(service._coordinator)
def test_stop_no_coordination(self):
self.config(backend_url=None, group="coordination")
service = CoordinatedService()
self.assertIsNone(service._coordinator)
service.start()
service.stop()
class TestPartitioner(TestCase):
def _get_partitioner(self, partitions, host=b'a'):
fixture = self.useFixture(fixtures.CoordinatorFixture(
'zake://', host))
group = 'group'
fixture.coordinator.create_group(group)
fixture.coordinator.join_group(group)
return coordination.Partitioner(fixture.coordinator, group, host,
partitions), fixture.coordinator
def test_callbacks(self):
cb1 = mock.Mock()
cb2 = mock.Mock()
partitions = list(range(0, 10))
p_one, c_one = self._get_partitioner(partitions)
p_one.start()
p_one.watch_partition_change(cb1)
p_one.watch_partition_change(cb2)
# Initial partitions are calucated upon service bootup
cb1.assert_called_with(partitions, None, None)
cb2.assert_called_with(partitions, None, None)
cb1.reset_mock()
cb2.reset_mock()
# Startup a new partioner that will cause the cb's to be called
p_two, c_two = self._get_partitioner(partitions, host=b'b')
p_two.start()
# We'll get the 5 first partition ranges
c_one.run_watchers()
cb1.assert_called_with(partitions[:5], [b'a', b'b'], mock.ANY)
cb2.assert_called_with(partitions[:5], [b'a', b'b'], mock.ANY)
def test_two_even_partitions(self):
partitions = list(range(0, 10))
p_one, c_one = self._get_partitioner(partitions)
p_two, c_two = self._get_partitioner(partitions, host=b'b')
p_one.start()
p_two.start()
# Call c_one watchers making it refresh it's partitions
c_one.run_watchers()
self.assertEqual([0, 1, 2, 3, 4], p_one.my_partitions)
self.assertEqual([5, 6, 7, 8, 9], p_two.my_partitions)
def test_two_odd_partitions(self):
partitions = list(range(0, 11))
p_one, c_one = self._get_partitioner(partitions)
p_two, c_two = self._get_partitioner(partitions, host=b'b')
p_one.start()
p_two.start()
# Call c_one watchers making it refresh it's partitions
c_one.run_watchers()
self.assertEqual([0, 1, 2, 3, 4, 5], p_one.my_partitions)
self.assertEqual([6, 7, 8, 9, 10], p_two.my_partitions)
def test_three_even_partitions(self):
partitions = list(range(0, 10))
p_one, c_one = self._get_partitioner(partitions)
p_two, c_two = self._get_partitioner(partitions, host=b'b')
p_three, c_three = self._get_partitioner(partitions, host=b'c')
p_one.start()
p_two.start()
p_three.start()
# Call c_one watchers making it refresh it's partitions
c_one.run_watchers()
c_two.run_watchers()
self.assertEqual([0, 1, 2, 3], p_one.my_partitions)
self.assertEqual([4, 5, 6, 7], p_two.my_partitions)
self.assertEqual([8, 9], p_three.my_partitions)
def test_three_odd_partitions(self):
partitions = list(range(0, 11))
p_one, c_one = self._get_partitioner(partitions)
p_two, c_two = self._get_partitioner(partitions, host=b'b')
p_three, c_three = self._get_partitioner(partitions, host=b'c')
p_one.start()
p_two.start()
p_three.start()
c_one.run_watchers()
c_two.run_watchers()
self.assertEqual([0, 1, 2, 3], p_one.my_partitions)
self.assertEqual([4, 5, 6, 7], p_two.my_partitions)
self.assertEqual([8, 9, 10], p_three.my_partitions)
class TestPartitionerWithoutBackend(TestCase):
def test_start(self):
# We test starting the partitioner and calling the watch func first
partitions = list(range(0, 10))
cb1 = mock.Mock()
cb2 = mock.Mock()
partitioner = coordination.Partitioner(
None, 'group', 'meme', partitions)
partitioner.watch_partition_change(cb1)
partitioner.watch_partition_change(cb2)
partitioner.start()
cb1.assert_called_with(partitions, None, None)
cb2.assert_called_with(partitions, None, None)
def test_cb_on_watch(self):
partitions = list(range(0, 10))
cb = mock.Mock()
partitioner = coordination.Partitioner(
None, 'group', 'meme', partitions)
partitioner.start()
partitioner.watch_partition_change(cb)
cb.assert_called_with(partitions, None, None)
class TestLeaderElection(TestCase):
def setUp(self):
super(TestLeaderElection, self).setUp()
self.coord_fixture = self.useFixture(fixtures.CoordinatorFixture(
'zake://', b'InsertNameHere'))
self.election = coordination.LeaderElection(
self.coordinator, 'President')
@property
def coordinator(self):
"""Helper for quick access to the raw coordinator"""
return self.coord_fixture.coordinator
def test_is_leader(self):
# We should not be leader until after we start the election.
self.assertFalse(self.election.is_leader)
# Start the election
self.election.start()
self.coordinator.run_watchers()
# We should now be the leader.
self.assertTrue(self.election.is_leader)
# Stop the election
self.election.stop()
# We should no longer be the leader.
self.assertFalse(self.election.is_leader)
def test_callbacks(self):
# We should not be leader until after we start the election.
self.assertFalse(self.election.is_leader)
# Create and attach a callback
mock_callback_one = mock.Mock()
self.election.watch_elected_as_leader(mock_callback_one)
# Ensure the callback has not yet been called.
self.assertFalse(mock_callback_one.called)
# Start the election
self.election.start()
self.coordinator.run_watchers()
# Ensure the callback has been called exactly once.
self.assertEqual(1, mock_callback_one.call_count)
# Create and attach a second callback after we start
mock_callback_two = mock.Mock()
self.election.watch_elected_as_leader(mock_callback_two)
# Ensure the callback has been called exactly once.
self.assertEqual(1, mock_callback_two.call_count)
class TestLeaderElectionWithoutBackend(TestCase):
def setUp(self):
super(TestLeaderElectionWithoutBackend, self).setUp()
# coordinator = None indicates no coordination backend has been
# configured
coordinator = None
self.election = coordination.LeaderElection(coordinator, 'President')
def test_is_leader(self):
# We should not be leader until after we start the election.
self.assertFalse(self.election.is_leader)
# Start the election
self.election.start()
# We should now be the leader.
self.assertTrue(self.election.is_leader)
# Stop the election
self.election.stop()
# We should no longer be the leader.
self.assertFalse(self.election.is_leader)
def test_callbacks(self):
# We should not be leader until after we start the election.
self.assertFalse(self.election.is_leader)
# Create and attach a callback
mock_callback_one = mock.Mock()
self.election.watch_elected_as_leader(mock_callback_one)
# Ensure the callback has not yet been called.
self.assertFalse(mock_callback_one.called)
# Start the election
self.election.start()
# Ensure the callback has been called exactly once.
self.assertEqual(1, mock_callback_one.call_count)
# Create and attach a second callback after we start
mock_callback_two = mock.Mock()
self.election.watch_elected_as_leader(mock_callback_two)
# Ensure the callback has been called exactly once.
self.assertEqual(1, mock_callback_two.call_count)
| apache-2.0 | 4,359,935,634,310,086,000 | 32.343042 | 77 | 0.640396 | false |
mrknow/filmkodi | plugin.video.xbmcfilm/resources/lib/utils/xppod.py | 11 | 2757 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# 2014 - Anonymous
def decode(string):
import base64
s = ''
str = _reverse(string)
for x in range(0,len(str)):
s += _decode_char(str[x])
return base64.b64decode(s)
def _reverse(s):
string = ''
length = len(s)-3
while length > 2:
string += s[length]
length = length - 1
length = len(string)
num2 = int(s[1]+s[0])/2
if num2 < length:
i = num2
while i < length:
if len(string) <= i: return string
if (i+1) < length: string = string[0:i] + string[i+1:]
i += num2
return string
def _decode_char(c):
array1 = ["0", "1", "2", "3", "4", "5", "6", "7", "9", "H", "M", "D", "X", "V", "J", "Q", "U", "G", "E", "T", "N", "o", "v", "y", "w", "k"]
array2 = ["c", "I", "W", "m", "8", "L", "l", "g", "R", "B", "a", "u", "s", "p", "z", "Z", "e", "d", "=", "x", "Y", "t", "n", "f", "b", "i"]
for i in range(0,len(array1)):
if c == array1[i]: return array2[i][0]
if c == array2[i]: return array1[i][0]
return c
def decode_hls(file_url):
def K12K(a, typ='b'):
codec_a = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'W', 'G', 'X', 'M', 'H', 'R', 'U', 'Z', 'I', 'D', '=', 'N', 'Q', 'V', 'B', 'L']
codec_b = ['b', 'z', 'a', 'c', 'l', 'm', 'e', 'p', 's', 'J', 'x', 'd', 'f', 't', 'i', 'o', 'Y', 'k', 'n', 'g', 'r', 'y', 'T', 'w', 'u', 'v']
if 'd' == typ:
tmp = codec_a
codec_a = codec_b
codec_b = tmp
idx = 0
while idx < len(codec_a):
a = a.replace(codec_a[idx], "___")
a = a.replace(codec_b[idx], codec_a[idx])
a = a.replace("___", codec_b[idx])
idx += 1
return a
def _xc13(_arg1):
_lg27 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="
_local2 = ""
_local3 = [0, 0, 0, 0]
_local4 = [0, 0, 0]
_local5 = 0
while _local5 < len(_arg1):
_local6 = 0
while _local6 < 4 and (_local5 + _local6) < len(_arg1):
_local3[_local6] = _lg27.find(_arg1[_local5 + _local6])
_local6 += 1
_local4[0] = ((_local3[0] << 2) + ((_local3[1] & 48) >> 4))
_local4[1] = (((_local3[1] & 15) << 4) + ((_local3[2] & 60) >> 2))
_local4[2] = (((_local3[2] & 3) << 6) + _local3[3])
_local7 = 0
while _local7 < len(_local4):
if _local3[_local7 + 1] == 64:
break
_local2 += chr(_local4[_local7])
_local7 += 1
_local5 += 4
return _local2
return _xc13(K12K(file_url, 'e')) | apache-2.0 | 2,917,051,027,489,230,300 | 34.333333 | 148 | 0.404719 | false |
job/exscript | tests/Exscript/emulators/IOSEmulatorTest.py | 6 | 1099 | import sys, unittest, re, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
from VirtualDeviceTest import VirtualDeviceTest
from Exscript.emulators import IOSEmulator
from Exscript.emulators.IOSEmulator import iosbanner
class IOSEmulatorTest(VirtualDeviceTest):
CORRELATE = IOSEmulator
cls = IOSEmulator
banner = iosbanner % ('myhost', 'myhost', 'myhost')
prompt = 'myhost#'
userprompt = 'Username: '
passwdprompt = 'Password: '
def testAddCommand(self):
VirtualDeviceTest.testAddCommand(self)
cs = self.cls('myhost',
strict = True,
echo = False,
login_type = self.cls.LOGIN_TYPE_NONE)
response = cs.do('show version')
self.assert_(response.startswith('Cisco Internetwork Operating'), response)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(IOSEmulatorTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
| gpl-2.0 | 5,588,313,901,772,205,000 | 35.633333 | 84 | 0.613285 | false |
airbnb/airflow | tests/providers/google/cloud/operators/test_mlengine_system.py | 10 | 1883 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from urllib.parse import urlparse
import pytest
from airflow.providers.google.cloud.example_dags.example_mlengine import (
JOB_DIR,
PREDICTION_OUTPUT,
SAVED_MODEL_PATH,
SUMMARY_STAGING,
SUMMARY_TMP,
)
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_AI_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
BUCKETS = {
urlparse(bucket_url).netloc
for bucket_url in {SAVED_MODEL_PATH, JOB_DIR, PREDICTION_OUTPUT, SUMMARY_TMP, SUMMARY_STAGING}
}
@pytest.mark.credential_file(GCP_AI_KEY)
class MlEngineExampleDagTest(GoogleSystemTest):
@provide_gcp_context(GCP_AI_KEY)
def setUp(self):
super().setUp()
for bucket in BUCKETS:
self.create_gcs_bucket(bucket)
@provide_gcp_context(GCP_AI_KEY)
def tearDown(self):
for bucket in BUCKETS:
self.delete_gcs_bucket(bucket)
super().tearDown()
@provide_gcp_context(GCP_AI_KEY)
def test_run_example_dag(self):
self.run_dag('example_gcp_mlengine', CLOUD_DAG_FOLDER)
| apache-2.0 | 3,716,020,366,242,964,500 | 33.87037 | 103 | 0.731811 | false |
beschoenen/MusicClone | music_clone.py | 1 | 6438 | import os
import shutil
import re
import json
import subprocess
from mutagen.id3 import ID3
from configparser import ConfigParser
config = ConfigParser()
config.read("config.ini")
main = config.get('basic', 'main_folder')
folder_lossy = config.get('basic', 'lossy_folder')
folder_lossless = config.get('basic', 'lossless_folder')
ffprobe = config.get('binaries', 'ffprobe')
ffmpeg = config.get('binaries', 'ffmpeg')
if main == "" or folder_lossy == "" or folder_lossless == "":
print("Config hasn't been filled out correctly")
exit()
if ffprobe == "":
ffprobe = "/usr/bin/ffprobe"
if ffmpeg == "":
ffmpeg = "/usr/bin/ffmpeg"
lossless_codecs = ["FLAC", "ALAC"]
lossy_codecs = ["MP3", "AAC"]
def get_folder(lossless):
return os.path.join(main, folder_lossless if lossless else folder_lossy)
def remove_quality(releases):
n_releases = []
for release in releases:
n_releases.append(re.sub(r'\[.+\]', '', release).strip())
return n_releases
def remove_extension(release):
n_songs = []
for song in release:
n_songs.append(os.path.splitext(song)[0])
return n_songs
def lossy_file_by_name(lossless_release, name):
for song in lossless_release:
if song.startswith(name):
return song
def lossless_release_by_name(lossless_artist, lossy_release):
for release in lossless_artist:
if release.startswith(lossy_release):
return release
class list_items:
folder = ""
def __init__(self, lossless):
self.folder = get_folder(lossless)
def artists(self):
artists = []
for artist in os.listdir(self.folder):
artist_folder = os.path.join(self.folder, artist)
if os.path.isdir(artist_folder) and not artist.startswith('.'):
artists.append(artist)
return artists
def releases(self, artist):
releases = []
folder = os.path.join(self.folder, artist)
for release in os.listdir(folder):
release_folder = os.path.join(folder, release)
if os.path.isdir(release_folder) and not release.startswith('.'):
releases.append(release)
return releases
def songs(self, artist, release):
songs = []
folder = os.path.join(self.folder, artist, release)
for song in os.listdir(folder):
song_path = os.path.join(folder, song)
if os.path.splitext(song_path)[1].upper() in [".M4A", ".MP3", ".FLAC"] \
and os.path.isfile(song_path) and not song.startswith('.'):
songs.append(song)
return songs
lossy_class = list_items(False)
lossless_class = list_items(True)
class manage_lossless:
def __init__(self):
self.artists()
def artists(self):
for artist in lossless_class.artists():
if artist not in lossy_class.artists():
os.makedirs(os.path.join(get_folder(False), artist))
self.releases(artist)
def releases(self, artist):
for release in lossless_class.releases(artist):
nq_release = re.sub(r'\[.+\]', '', release).strip()
if nq_release not in lossy_class.releases(artist):
os.makedirs(os.path.join(get_folder(False), artist, nq_release))
self.songs(artist, release)
def songs(self, artist, release):
nq_release = re.sub(r'\[.+\]', '', release).strip()
for song in lossless_class.songs(artist, release):
ne_song = os.path.splitext(song)[0]
lossy_path = os.path.join(get_folder(False), artist, nq_release,
lossy_file_by_name(lossless_class.songs(artist, release), ne_song))
lossless_path = os.path.join(get_folder(True), artist, release, song)
if ne_song not in lossy_class.songs(artist, nq_release):
if self.get_codec(lossless_path) in lossless_codecs:
self.encode(lossless_path, os.path.join(get_folder(False), artist, nq_release), ne_song)
else:
self.copy_file(lossless_path, lossy_path)
else:
if self.is_modified(lossless_path, lossy_path):
self.copy_tags(lossless_path, lossy_path)
def get_codec(self, path):
json_string = subprocess.check_output(
"{0} -v quiet -print_format json -show_format -show_streams \"{1}\"".format(ffprobe, path),
shell=True)
info = json.loads(json_string.decode('utf-8'))
return info["streams"][0]["codec_name"].upper()
def is_modified(self, lossless_path, lossy_path):
return os.path.getmtime(lossless_path) > os.path.getmtime(lossy_path)
def copy_tags(self, lossless_file, lossy_file):
source = ID3(lossless_file)
dest = ID3(lossy_file)
for key in source:
dest[key] = source[key]
dest.save()
def copy_file(self, lossless_file, lossy_file):
shutil.copy(lossless_file, lossy_file)
def encode(self, lossless_path, lossy_folder, file):
os.system("{0} -loglevel warning -i \"{1}\" -ab 320k -map_metadata 0 \"{2}\"".format(
ffmpeg,
lossless_path,
os.path.join(lossy_folder, file + ".mp3")
))
manage_lossless()
class manage_lossy:
def __init__(self):
self.artists()
def artists(self):
for artist in lossy_class.artists():
artist_folder = os.path.join(get_folder(False), artist)
if artist not in lossless_class.artists():
shutil.rmtree(artist_folder)
else:
self.releases(artist)
def releases(self, artist):
for release in lossy_class.releases(artist):
release_folder = os.path.join(get_folder(False), artist, release)
if release not in remove_quality(lossy_class.releases(artist)):
shutil.rmtree(release_folder)
else:
self.songs(artist, release)
def songs(self, artist, release):
for song in lossy_class.songs(artist, release):
song_path = os.path.join(get_folder(False), artist, release, song)
if os.path.splitext(song)[0] not in \
remove_extension(lossless_class.songs(artist, lossless_release_by_name(lossless_class.releases(artist), release))):
os.remove(song_path)
manage_lossy()
| apache-2.0 | 6,808,435,926,520,776,000 | 33.612903 | 135 | 0.600963 | false |
elhuhdron/emdrp | legacy/neon/transforms/emcost.py | 2 | 3877 | # The MIT License (MIT)
#
# Copyright (c) 2016 Paul Watkins, National Institutes of Health / NINDS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from neon.transforms.cost import Metric
class EMMetric(Metric):
"""
Compute the EM specific metrics
"""
def __init__(self, oshape=None, use_softmax=False):
if not oshape:
oshape = [1,2]
elif len(oshape)==2:
self.oshape = list(oshape)
else:
self.oshape = [oshape[0]*oshape[1], oshape[2]]
self.nclass = self.oshape[1]
self.use_softmax = use_softmax
# self.predictions = self.be.iobuf(1)
# self.targets = self.be.iobuf(1)
self.class_error = self.be.iobuf(1) # Contains per record metric
self.log_prob = self.be.iobuf(1) # Contains per record metric
self.log_name = 'CrossEntropyMulti' if use_softmax else 'CrossEntropyBinary'
self.metric_names = ['ClassificationError', self.log_name]
def __call__(self, y, t, calcrange=slice(0, None)):
"""
Compute the accuracy metric
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
numpy ary : Returns the metrics in numpy array,
[ClassificationError CrossEntropy]
"""
# xxx - really want to do something like this where argmax is only over the classes, but
# neon won't do an argmax with more than 2 dimensions:
# ValueError: Operations that are not simple elementwise are only currently supported in 2 dimensions.
# # calculates mean class error (over nclass) over all output pixels (noutputs)
# self.predictions[:] = self.be.argmax(y.reshape(self.oshape+[-1]), axis=1)
# self.targets[:] = self.be.argmax(t.reshape(self.oshape+[-1]), axis=1)
# self.class_error[:] = self.be.not_equal(self.predictions, self.targets).mean(axis=0)
# instead just sum all correct or not-correct as if all outputs were completely independent
#self.class_accuracy[:] = self.be.mean((y > 0.5) * t + (y <= 0.5) * (1 - t), axis=0)
self.class_error[:] = self.be.mean((y <= 0.5) * t + (y > 0.5) * (1 - t), axis=0)
# calculates CrossEntropy (Multi or Binary depending on use_softmax) summed over all outputs
log_tgt = - self.be.safelog(y) * t
if self.use_softmax:
self.log_prob[:] = self.be.sum(log_tgt, axis=0)
else:
self.log_prob[:] = self.be.sum(log_tgt - self.be.safelog(1 - y) * (1 - t), axis=0)
return np.array((self.class_error.get()[:, calcrange].mean(),
self.log_prob.get()[:, calcrange].mean()))
| mit | 5,693,443,460,563,538,000 | 44.623529 | 110 | 0.637091 | false |
minimalparts/PeARS | app/posTagger.py | 1 | 1381 | import textblob_aptagger
from textblob import TextBlob, Word
#########################################
# Start POS tagger
#########################################
pt = textblob_aptagger.PerceptronTagger()
#######################################
# Tag query
#######################################
def tagQuery(query):
taggedquery = ""
try:
tags = pt.tag(query)
if len(tags) > 0:
for word in tags:
surface = word[0]
pos = word[1]
# print word
try:
if pos[0] == 'N' or pos[0] == 'V':
tag = Word(surface).lemmatize(
pos[0].lower()) + "_" + pos[0]
else:
if pos[0] == 'J':
# Hack -- convert pos J to pos A because that's how
# adjectives are represented in dm file
tag = Word(surface).lemmatize().lower() + "_A"
else:
tag = Word(surface).lemmatize(
).lower() + "_" + pos[0]
taggedquery = taggedquery + tag + " "
except:
taggedquery = taggedquery + surface + "_" + pos[0] + " "
except:
print "ERROR processing query", query
return taggedquery
| mit | -5,797,652,619,619,997,000 | 33.525 | 79 | 0.375091 | false |
twobob/buildroot-kindle | output/build/host-python-2.7.2/Tools/scripts/texi2html.py | 46 | 69824 | #! /usr/bin/env python
# Convert GNU texinfo files into HTML, one file per node.
# Based on Texinfo 2.14.
# Usage: texi2html [-d] [-d] [-c] inputfile outputdirectory
# The input file must be a complete texinfo file, e.g. emacs.texi.
# This creates many files (one per info node) in the output directory,
# overwriting existing files of the same name. All files created have
# ".html" as their extension.
# XXX To do:
# - handle @comment*** correctly
# - handle @xref {some words} correctly
# - handle @ftable correctly (items aren't indexed?)
# - handle @itemx properly
# - handle @exdent properly
# - add links directly to the proper line from indices
# - check against the definitive list of @-cmds; we still miss (among others):
# - @defindex (hard)
# - @c(omment) in the middle of a line (rarely used)
# - @this* (not really needed, only used in headers anyway)
# - @today{} (ever used outside title page?)
# More consistent handling of chapters/sections/etc.
# Lots of documentation
# Many more options:
# -top designate top node
# -links customize which types of links are included
# -split split at chapters or sections instead of nodes
# -name Allow different types of filename handling. Non unix systems
# will have problems with long node names
# ...
# Support the most recent texinfo version and take a good look at HTML 3.0
# More debugging output (customizable) and more flexible error handling
# How about icons ?
# rpyron 2002-05-07
# Robert Pyron <[email protected]>
# 1. BUGFIX: In function makefile(), strip blanks from the nodename.
# This is necessary to match the behavior of parser.makeref() and
# parser.do_node().
# 2. BUGFIX fixed KeyError in end_ifset (well, I may have just made
# it go away, rather than fix it)
# 3. BUGFIX allow @menu and menu items inside @ifset or @ifclear
# 4. Support added for:
# @uref URL reference
# @image image file reference (see note below)
# @multitable output an HTML table
# @vtable
# 5. Partial support for accents, to match MAKEINFO output
# 6. I added a new command-line option, '-H basename', to specify
# HTML Help output. This will cause three files to be created
# in the current directory:
# `basename`.hhp HTML Help Workshop project file
# `basename`.hhc Contents file for the project
# `basename`.hhk Index file for the project
# When fed into HTML Help Workshop, the resulting file will be
# named `basename`.chm.
# 7. A new class, HTMLHelp, to accomplish item 6.
# 8. Various calls to HTMLHelp functions.
# A NOTE ON IMAGES: Just as 'outputdirectory' must exist before
# running this program, all referenced images must already exist
# in outputdirectory.
import os
import sys
import string
import re
MAGIC = '\\input texinfo'
cmprog = re.compile('^@([a-z]+)([ \t]|$)') # Command (line-oriented)
blprog = re.compile('^[ \t]*$') # Blank line
kwprog = re.compile('@[a-z]+') # Keyword (embedded, usually
# with {} args)
spprog = re.compile('[\n@{}&<>]') # Special characters in
# running text
#
# menu item (Yuck!)
miprog = re.compile('^\* ([^:]*):(:|[ \t]*([^\t,\n.]+)([^ \t\n]*))[ \t\n]*')
# 0 1 1 2 3 34 42 0
# ----- ---------- ---------
# -|-----------------------------
# -----------------------------------------------------
class HTMLNode:
"""Some of the parser's functionality is separated into this class.
A Node accumulates its contents, takes care of links to other Nodes
and saves itself when it is finished and all links are resolved.
"""
DOCTYPE = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">'
type = 0
cont = ''
epilogue = '</BODY></HTML>\n'
def __init__(self, dir, name, topname, title, next, prev, up):
self.dirname = dir
self.name = name
if topname:
self.topname = topname
else:
self.topname = name
self.title = title
self.next = next
self.prev = prev
self.up = up
self.lines = []
def write(self, *lines):
map(self.lines.append, lines)
def flush(self):
fp = open(self.dirname + '/' + makefile(self.name), 'w')
fp.write(self.prologue)
fp.write(self.text)
fp.write(self.epilogue)
fp.close()
def link(self, label, nodename, rel=None, rev=None):
if nodename:
if nodename.lower() == '(dir)':
addr = '../dir.html'
title = ''
else:
addr = makefile(nodename)
title = ' TITLE="%s"' % nodename
self.write(label, ': <A HREF="', addr, '"', \
rel and (' REL=' + rel) or "", \
rev and (' REV=' + rev) or "", \
title, '>', nodename, '</A> \n')
def finalize(self):
length = len(self.lines)
self.text = ''.join(self.lines)
self.lines = []
self.open_links()
self.output_links()
self.close_links()
links = ''.join(self.lines)
self.lines = []
self.prologue = (
self.DOCTYPE +
'\n<HTML><HEAD>\n'
' <!-- Converted with texi2html and Python -->\n'
' <TITLE>' + self.title + '</TITLE>\n'
' <LINK REL=Next HREF="'
+ makefile(self.next) + '" TITLE="' + self.next + '">\n'
' <LINK REL=Previous HREF="'
+ makefile(self.prev) + '" TITLE="' + self.prev + '">\n'
' <LINK REL=Up HREF="'
+ makefile(self.up) + '" TITLE="' + self.up + '">\n'
'</HEAD><BODY>\n' +
links)
if length > 20:
self.epilogue = '<P>\n%s</BODY></HTML>\n' % links
def open_links(self):
self.write('<HR>\n')
def close_links(self):
self.write('<HR>\n')
def output_links(self):
if self.cont != self.next:
self.link(' Cont', self.cont)
self.link(' Next', self.next, rel='Next')
self.link(' Prev', self.prev, rel='Previous')
self.link(' Up', self.up, rel='Up')
if self.name <> self.topname:
self.link(' Top', self.topname)
class HTML3Node(HTMLNode):
DOCTYPE = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML Level 3//EN//3.0">'
def open_links(self):
self.write('<DIV CLASS=Navigation>\n <HR>\n')
def close_links(self):
self.write(' <HR>\n</DIV>\n')
class TexinfoParser:
COPYRIGHT_SYMBOL = "©"
FN_ID_PATTERN = "(%(id)s)"
FN_SOURCE_PATTERN = '<A NAME=footnoteref%(id)s' \
' HREF="#footnotetext%(id)s">' \
+ FN_ID_PATTERN + '</A>'
FN_TARGET_PATTERN = '<A NAME=footnotetext%(id)s' \
' HREF="#footnoteref%(id)s">' \
+ FN_ID_PATTERN + '</A>\n%(text)s<P>\n'
FN_HEADER = '\n<P>\n<HR NOSHADE SIZE=1 WIDTH=200>\n' \
'<STRONG><EM>Footnotes</EM></STRONG>\n<P>'
Node = HTMLNode
# Initialize an instance
def __init__(self):
self.unknown = {} # statistics about unknown @-commands
self.filenames = {} # Check for identical filenames
self.debugging = 0 # larger values produce more output
self.print_headers = 0 # always print headers?
self.nodefp = None # open file we're writing to
self.nodelineno = 0 # Linenumber relative to node
self.links = None # Links from current node
self.savetext = None # If not None, save text head instead
self.savestack = [] # If not None, save text head instead
self.htmlhelp = None # html help data
self.dirname = 'tmp' # directory where files are created
self.includedir = '.' # directory to search @include files
self.nodename = '' # name of current node
self.topname = '' # name of top node (first node seen)
self.title = '' # title of this whole Texinfo tree
self.resetindex() # Reset all indices
self.contents = [] # Reset table of contents
self.numbering = [] # Reset section numbering counters
self.nofill = 0 # Normal operation: fill paragraphs
self.values={'html': 1} # Names that should be parsed in ifset
self.stackinfo={} # Keep track of state in the stack
# XXX The following should be reset per node?!
self.footnotes = [] # Reset list of footnotes
self.itemarg = None # Reset command used by @item
self.itemnumber = None # Reset number for @item in @enumerate
self.itemindex = None # Reset item index name
self.node = None
self.nodestack = []
self.cont = 0
self.includedepth = 0
# Set htmlhelp helper class
def sethtmlhelp(self, htmlhelp):
self.htmlhelp = htmlhelp
# Set (output) directory name
def setdirname(self, dirname):
self.dirname = dirname
# Set include directory name
def setincludedir(self, includedir):
self.includedir = includedir
# Parse the contents of an entire file
def parse(self, fp):
line = fp.readline()
lineno = 1
while line and (line[0] == '%' or blprog.match(line)):
line = fp.readline()
lineno = lineno + 1
if line[:len(MAGIC)] <> MAGIC:
raise SyntaxError, 'file does not begin with %r' % (MAGIC,)
self.parserest(fp, lineno)
# Parse the contents of a file, not expecting a MAGIC header
def parserest(self, fp, initial_lineno):
lineno = initial_lineno
self.done = 0
self.skip = 0
self.stack = []
accu = []
while not self.done:
line = fp.readline()
self.nodelineno = self.nodelineno + 1
if not line:
if accu:
if not self.skip: self.process(accu)
accu = []
if initial_lineno > 0:
print '*** EOF before @bye'
break
lineno = lineno + 1
mo = cmprog.match(line)
if mo:
a, b = mo.span(1)
cmd = line[a:b]
if cmd in ('noindent', 'refill'):
accu.append(line)
else:
if accu:
if not self.skip:
self.process(accu)
accu = []
self.command(line, mo)
elif blprog.match(line) and \
'format' not in self.stack and \
'example' not in self.stack:
if accu:
if not self.skip:
self.process(accu)
if self.nofill:
self.write('\n')
else:
self.write('<P>\n')
accu = []
else:
# Append the line including trailing \n!
accu.append(line)
#
if self.skip:
print '*** Still skipping at the end'
if self.stack:
print '*** Stack not empty at the end'
print '***', self.stack
if self.includedepth == 0:
while self.nodestack:
self.nodestack[-1].finalize()
self.nodestack[-1].flush()
del self.nodestack[-1]
# Start saving text in a buffer instead of writing it to a file
def startsaving(self):
if self.savetext <> None:
self.savestack.append(self.savetext)
# print '*** Recursively saving text, expect trouble'
self.savetext = ''
# Return the text saved so far and start writing to file again
def collectsavings(self):
savetext = self.savetext
if len(self.savestack) > 0:
self.savetext = self.savestack[-1]
del self.savestack[-1]
else:
self.savetext = None
return savetext or ''
# Write text to file, or save it in a buffer, or ignore it
def write(self, *args):
try:
text = ''.join(args)
except:
print args
raise TypeError
if self.savetext <> None:
self.savetext = self.savetext + text
elif self.nodefp:
self.nodefp.write(text)
elif self.node:
self.node.write(text)
# Complete the current node -- write footnotes and close file
def endnode(self):
if self.savetext <> None:
print '*** Still saving text at end of node'
dummy = self.collectsavings()
if self.footnotes:
self.writefootnotes()
if self.nodefp:
if self.nodelineno > 20:
self.write('<HR>\n')
[name, next, prev, up] = self.nodelinks[:4]
self.link('Next', next)
self.link('Prev', prev)
self.link('Up', up)
if self.nodename <> self.topname:
self.link('Top', self.topname)
self.write('<HR>\n')
self.write('</BODY>\n')
self.nodefp.close()
self.nodefp = None
elif self.node:
if not self.cont and \
(not self.node.type or \
(self.node.next and self.node.prev and self.node.up)):
self.node.finalize()
self.node.flush()
else:
self.nodestack.append(self.node)
self.node = None
self.nodename = ''
# Process a list of lines, expanding embedded @-commands
# This mostly distinguishes between menus and normal text
def process(self, accu):
if self.debugging > 1:
print '!'*self.debugging, 'process:', self.skip, self.stack,
if accu: print accu[0][:30],
if accu[0][30:] or accu[1:]: print '...',
print
if self.inmenu():
# XXX should be done differently
for line in accu:
mo = miprog.match(line)
if not mo:
line = line.strip() + '\n'
self.expand(line)
continue
bgn, end = mo.span(0)
a, b = mo.span(1)
c, d = mo.span(2)
e, f = mo.span(3)
g, h = mo.span(4)
label = line[a:b]
nodename = line[c:d]
if nodename[0] == ':': nodename = label
else: nodename = line[e:f]
punct = line[g:h]
self.write(' <LI><A HREF="',
makefile(nodename),
'">', nodename,
'</A>', punct, '\n')
self.htmlhelp.menuitem(nodename)
self.expand(line[end:])
else:
text = ''.join(accu)
self.expand(text)
# find 'menu' (we might be inside 'ifset' or 'ifclear')
def inmenu(self):
#if 'menu' in self.stack:
# print 'inmenu :', self.skip, self.stack, self.stackinfo
stack = self.stack
while stack and stack[-1] in ('ifset','ifclear'):
try:
if self.stackinfo[len(stack)]:
return 0
except KeyError:
pass
stack = stack[:-1]
return (stack and stack[-1] == 'menu')
# Write a string, expanding embedded @-commands
def expand(self, text):
stack = []
i = 0
n = len(text)
while i < n:
start = i
mo = spprog.search(text, i)
if mo:
i = mo.start()
else:
self.write(text[start:])
break
self.write(text[start:i])
c = text[i]
i = i+1
if c == '\n':
self.write('\n')
continue
if c == '<':
self.write('<')
continue
if c == '>':
self.write('>')
continue
if c == '&':
self.write('&')
continue
if c == '{':
stack.append('')
continue
if c == '}':
if not stack:
print '*** Unmatched }'
self.write('}')
continue
cmd = stack[-1]
del stack[-1]
try:
method = getattr(self, 'close_' + cmd)
except AttributeError:
self.unknown_close(cmd)
continue
method()
continue
if c <> '@':
# Cannot happen unless spprog is changed
raise RuntimeError, 'unexpected funny %r' % c
start = i
while i < n and text[i] in string.ascii_letters: i = i+1
if i == start:
# @ plus non-letter: literal next character
i = i+1
c = text[start:i]
if c == ':':
# `@:' means no extra space after
# preceding `.', `?', `!' or `:'
pass
else:
# `@.' means a sentence-ending period;
# `@@', `@{', `@}' quote `@', `{', `}'
self.write(c)
continue
cmd = text[start:i]
if i < n and text[i] == '{':
i = i+1
stack.append(cmd)
try:
method = getattr(self, 'open_' + cmd)
except AttributeError:
self.unknown_open(cmd)
continue
method()
continue
try:
method = getattr(self, 'handle_' + cmd)
except AttributeError:
self.unknown_handle(cmd)
continue
method()
if stack:
print '*** Stack not empty at para:', stack
# --- Handle unknown embedded @-commands ---
def unknown_open(self, cmd):
print '*** No open func for @' + cmd + '{...}'
cmd = cmd + '{'
self.write('@', cmd)
if not self.unknown.has_key(cmd):
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
def unknown_close(self, cmd):
print '*** No close func for @' + cmd + '{...}'
cmd = '}' + cmd
self.write('}')
if not self.unknown.has_key(cmd):
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
def unknown_handle(self, cmd):
print '*** No handler for @' + cmd
self.write('@', cmd)
if not self.unknown.has_key(cmd):
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
# XXX The following sections should be ordered as the texinfo docs
# --- Embedded @-commands without {} argument list --
def handle_noindent(self): pass
def handle_refill(self): pass
# --- Include file handling ---
def do_include(self, args):
file = args
file = os.path.join(self.includedir, file)
try:
fp = open(file, 'r')
except IOError, msg:
print '*** Can\'t open include file', repr(file)
return
print '!'*self.debugging, '--> file', repr(file)
save_done = self.done
save_skip = self.skip
save_stack = self.stack
self.includedepth = self.includedepth + 1
self.parserest(fp, 0)
self.includedepth = self.includedepth - 1
fp.close()
self.done = save_done
self.skip = save_skip
self.stack = save_stack
print '!'*self.debugging, '<-- file', repr(file)
# --- Special Insertions ---
def open_dmn(self): pass
def close_dmn(self): pass
def open_dots(self): self.write('...')
def close_dots(self): pass
def open_bullet(self): pass
def close_bullet(self): pass
def open_TeX(self): self.write('TeX')
def close_TeX(self): pass
def handle_copyright(self): self.write(self.COPYRIGHT_SYMBOL)
def open_copyright(self): self.write(self.COPYRIGHT_SYMBOL)
def close_copyright(self): pass
def open_minus(self): self.write('-')
def close_minus(self): pass
# --- Accents ---
# rpyron 2002-05-07
# I would like to do at least as well as makeinfo when
# it is producing HTML output:
#
# input output
# @"o @"o umlaut accent
# @'o 'o acute accent
# @,{c} @,{c} cedilla accent
# @=o @=o macron/overbar accent
# @^o @^o circumflex accent
# @`o `o grave accent
# @~o @~o tilde accent
# @dotaccent{o} @dotaccent{o} overdot accent
# @H{o} @H{o} long Hungarian umlaut
# @ringaccent{o} @ringaccent{o} ring accent
# @tieaccent{oo} @tieaccent{oo} tie-after accent
# @u{o} @u{o} breve accent
# @ubaraccent{o} @ubaraccent{o} underbar accent
# @udotaccent{o} @udotaccent{o} underdot accent
# @v{o} @v{o} hacek or check accent
# @exclamdown{} ¡ upside-down !
# @questiondown{} ¿ upside-down ?
# @aa{},@AA{} å,Å a,A with circle
# @ae{},@AE{} æ,Æ ae,AE ligatures
# @dotless{i} @dotless{i} dotless i
# @dotless{j} @dotless{j} dotless j
# @l{},@L{} l/,L/ suppressed-L,l
# @o{},@O{} ø,Ø O,o with slash
# @oe{},@OE{} oe,OE oe,OE ligatures
# @ss{} ß es-zet or sharp S
#
# The following character codes and approximations have been
# copied from makeinfo's HTML output.
def open_exclamdown(self): self.write('¡') # upside-down !
def close_exclamdown(self): pass
def open_questiondown(self): self.write('¿') # upside-down ?
def close_questiondown(self): pass
def open_aa(self): self.write('å') # a with circle
def close_aa(self): pass
def open_AA(self): self.write('Å') # A with circle
def close_AA(self): pass
def open_ae(self): self.write('æ') # ae ligatures
def close_ae(self): pass
def open_AE(self): self.write('Æ') # AE ligatures
def close_AE(self): pass
def open_o(self): self.write('ø') # o with slash
def close_o(self): pass
def open_O(self): self.write('Ø') # O with slash
def close_O(self): pass
def open_ss(self): self.write('ß') # es-zet or sharp S
def close_ss(self): pass
def open_oe(self): self.write('oe') # oe ligatures
def close_oe(self): pass
def open_OE(self): self.write('OE') # OE ligatures
def close_OE(self): pass
def open_l(self): self.write('l/') # suppressed-l
def close_l(self): pass
def open_L(self): self.write('L/') # suppressed-L
def close_L(self): pass
# --- Special Glyphs for Examples ---
def open_result(self): self.write('=>')
def close_result(self): pass
def open_expansion(self): self.write('==>')
def close_expansion(self): pass
def open_print(self): self.write('-|')
def close_print(self): pass
def open_error(self): self.write('error-->')
def close_error(self): pass
def open_equiv(self): self.write('==')
def close_equiv(self): pass
def open_point(self): self.write('-!-')
def close_point(self): pass
# --- Cross References ---
def open_pxref(self):
self.write('see ')
self.startsaving()
def close_pxref(self):
self.makeref()
def open_xref(self):
self.write('See ')
self.startsaving()
def close_xref(self):
self.makeref()
def open_ref(self):
self.startsaving()
def close_ref(self):
self.makeref()
def open_inforef(self):
self.write('See info file ')
self.startsaving()
def close_inforef(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 3: args.append('')
node = args[0]
file = args[2]
self.write('`', file, '\', node `', node, '\'')
def makeref(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 5: args.append('')
nodename = label = args[0]
if args[2]: label = args[2]
file = args[3]
title = args[4]
href = makefile(nodename)
if file:
href = '../' + file + '/' + href
self.write('<A HREF="', href, '">', label, '</A>')
# rpyron 2002-05-07 uref support
def open_uref(self):
self.startsaving()
def close_uref(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 2: args.append('')
href = args[0]
label = args[1]
if not label: label = href
self.write('<A HREF="', href, '">', label, '</A>')
# rpyron 2002-05-07 image support
# GNU makeinfo producing HTML output tries `filename.png'; if
# that does not exist, it tries `filename.jpg'. If that does
# not exist either, it complains. GNU makeinfo does not handle
# GIF files; however, I include GIF support here because
# MySQL documentation uses GIF files.
def open_image(self):
self.startsaving()
def close_image(self):
self.makeimage()
def makeimage(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 5: args.append('')
filename = args[0]
width = args[1]
height = args[2]
alt = args[3]
ext = args[4]
# The HTML output will have a reference to the image
# that is relative to the HTML output directory,
# which is what 'filename' gives us. However, we need
# to find it relative to our own current directory,
# so we construct 'imagename'.
imagelocation = self.dirname + '/' + filename
if os.path.exists(imagelocation+'.png'):
filename += '.png'
elif os.path.exists(imagelocation+'.jpg'):
filename += '.jpg'
elif os.path.exists(imagelocation+'.gif'): # MySQL uses GIF files
filename += '.gif'
else:
print "*** Cannot find image " + imagelocation
#TODO: what is 'ext'?
self.write('<IMG SRC="', filename, '"', \
width and (' WIDTH="' + width + '"') or "", \
height and (' HEIGHT="' + height + '"') or "", \
alt and (' ALT="' + alt + '"') or "", \
'/>' )
self.htmlhelp.addimage(imagelocation)
# --- Marking Words and Phrases ---
# --- Other @xxx{...} commands ---
def open_(self): pass # Used by {text enclosed in braces}
def close_(self): pass
open_asis = open_
close_asis = close_
def open_cite(self): self.write('<CITE>')
def close_cite(self): self.write('</CITE>')
def open_code(self): self.write('<CODE>')
def close_code(self): self.write('</CODE>')
def open_t(self): self.write('<TT>')
def close_t(self): self.write('</TT>')
def open_dfn(self): self.write('<DFN>')
def close_dfn(self): self.write('</DFN>')
def open_emph(self): self.write('<EM>')
def close_emph(self): self.write('</EM>')
def open_i(self): self.write('<I>')
def close_i(self): self.write('</I>')
def open_footnote(self):
# if self.savetext <> None:
# print '*** Recursive footnote -- expect weirdness'
id = len(self.footnotes) + 1
self.write(self.FN_SOURCE_PATTERN % {'id': repr(id)})
self.startsaving()
def close_footnote(self):
id = len(self.footnotes) + 1
self.footnotes.append((id, self.collectsavings()))
def writefootnotes(self):
self.write(self.FN_HEADER)
for id, text in self.footnotes:
self.write(self.FN_TARGET_PATTERN
% {'id': repr(id), 'text': text})
self.footnotes = []
def open_file(self): self.write('<CODE>')
def close_file(self): self.write('</CODE>')
def open_kbd(self): self.write('<KBD>')
def close_kbd(self): self.write('</KBD>')
def open_key(self): self.write('<KEY>')
def close_key(self): self.write('</KEY>')
def open_r(self): self.write('<R>')
def close_r(self): self.write('</R>')
def open_samp(self): self.write('`<SAMP>')
def close_samp(self): self.write('</SAMP>\'')
def open_sc(self): self.write('<SMALLCAPS>')
def close_sc(self): self.write('</SMALLCAPS>')
def open_strong(self): self.write('<STRONG>')
def close_strong(self): self.write('</STRONG>')
def open_b(self): self.write('<B>')
def close_b(self): self.write('</B>')
def open_var(self): self.write('<VAR>')
def close_var(self): self.write('</VAR>')
def open_w(self): self.write('<NOBREAK>')
def close_w(self): self.write('</NOBREAK>')
def open_url(self): self.startsaving()
def close_url(self):
text = self.collectsavings()
self.write('<A HREF="', text, '">', text, '</A>')
def open_email(self): self.startsaving()
def close_email(self):
text = self.collectsavings()
self.write('<A HREF="mailto:', text, '">', text, '</A>')
open_titlefont = open_
close_titlefont = close_
def open_small(self): pass
def close_small(self): pass
def command(self, line, mo):
a, b = mo.span(1)
cmd = line[a:b]
args = line[b:].strip()
if self.debugging > 1:
print '!'*self.debugging, 'command:', self.skip, self.stack, \
'@' + cmd, args
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
try:
func = getattr(self, 'bgn_' + cmd)
except AttributeError:
# don't complain if we are skipping anyway
if not self.skip:
self.unknown_cmd(cmd, args)
return
self.stack.append(cmd)
func(args)
return
if not self.skip or cmd == 'end':
func(args)
def unknown_cmd(self, cmd, args):
print '*** unknown', '@' + cmd, args
if not self.unknown.has_key(cmd):
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
def do_end(self, args):
words = args.split()
if not words:
print '*** @end w/o args'
else:
cmd = words[0]
if not self.stack or self.stack[-1] <> cmd:
print '*** @end', cmd, 'unexpected'
else:
del self.stack[-1]
try:
func = getattr(self, 'end_' + cmd)
except AttributeError:
self.unknown_end(cmd)
return
func()
def unknown_end(self, cmd):
cmd = 'end ' + cmd
print '*** unknown', '@' + cmd
if not self.unknown.has_key(cmd):
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
# --- Comments ---
def do_comment(self, args): pass
do_c = do_comment
# --- Conditional processing ---
def bgn_ifinfo(self, args): pass
def end_ifinfo(self): pass
def bgn_iftex(self, args): self.skip = self.skip + 1
def end_iftex(self): self.skip = self.skip - 1
def bgn_ignore(self, args): self.skip = self.skip + 1
def end_ignore(self): self.skip = self.skip - 1
def bgn_tex(self, args): self.skip = self.skip + 1
def end_tex(self): self.skip = self.skip - 1
def do_set(self, args):
fields = args.split(' ')
key = fields[0]
if len(fields) == 1:
value = 1
else:
value = ' '.join(fields[1:])
self.values[key] = value
def do_clear(self, args):
self.values[args] = None
def bgn_ifset(self, args):
if args not in self.values.keys() \
or self.values[args] is None:
self.skip = self.skip + 1
self.stackinfo[len(self.stack)] = 1
else:
self.stackinfo[len(self.stack)] = 0
def end_ifset(self):
try:
if self.stackinfo[len(self.stack) + 1]:
self.skip = self.skip - 1
del self.stackinfo[len(self.stack) + 1]
except KeyError:
print '*** end_ifset: KeyError :', len(self.stack) + 1
def bgn_ifclear(self, args):
if args in self.values.keys() \
and self.values[args] is not None:
self.skip = self.skip + 1
self.stackinfo[len(self.stack)] = 1
else:
self.stackinfo[len(self.stack)] = 0
def end_ifclear(self):
try:
if self.stackinfo[len(self.stack) + 1]:
self.skip = self.skip - 1
del self.stackinfo[len(self.stack) + 1]
except KeyError:
print '*** end_ifclear: KeyError :', len(self.stack) + 1
def open_value(self):
self.startsaving()
def close_value(self):
key = self.collectsavings()
if key in self.values.keys():
self.write(self.values[key])
else:
print '*** Undefined value: ', key
# --- Beginning a file ---
do_finalout = do_comment
do_setchapternewpage = do_comment
do_setfilename = do_comment
def do_settitle(self, args):
self.startsaving()
self.expand(args)
self.title = self.collectsavings()
def do_parskip(self, args): pass
# --- Ending a file ---
def do_bye(self, args):
self.endnode()
self.done = 1
# --- Title page ---
def bgn_titlepage(self, args): self.skip = self.skip + 1
def end_titlepage(self): self.skip = self.skip - 1
def do_shorttitlepage(self, args): pass
def do_center(self, args):
# Actually not used outside title page...
self.write('<H1>')
self.expand(args)
self.write('</H1>\n')
do_title = do_center
do_subtitle = do_center
do_author = do_center
do_vskip = do_comment
do_vfill = do_comment
do_smallbook = do_comment
do_paragraphindent = do_comment
do_setchapternewpage = do_comment
do_headings = do_comment
do_footnotestyle = do_comment
do_evenheading = do_comment
do_evenfooting = do_comment
do_oddheading = do_comment
do_oddfooting = do_comment
do_everyheading = do_comment
do_everyfooting = do_comment
# --- Nodes ---
def do_node(self, args):
self.endnode()
self.nodelineno = 0
parts = [s.strip() for s in args.split(',')]
while len(parts) < 4: parts.append('')
self.nodelinks = parts
[name, next, prev, up] = parts[:4]
file = self.dirname + '/' + makefile(name)
if self.filenames.has_key(file):
print '*** Filename already in use: ', file
else:
if self.debugging: print '!'*self.debugging, '--- writing', file
self.filenames[file] = 1
# self.nodefp = open(file, 'w')
self.nodename = name
if self.cont and self.nodestack:
self.nodestack[-1].cont = self.nodename
if not self.topname: self.topname = name
title = name
if self.title: title = title + ' -- ' + self.title
self.node = self.Node(self.dirname, self.nodename, self.topname,
title, next, prev, up)
self.htmlhelp.addnode(self.nodename,next,prev,up,file)
def link(self, label, nodename):
if nodename:
if nodename.lower() == '(dir)':
addr = '../dir.html'
else:
addr = makefile(nodename)
self.write(label, ': <A HREF="', addr, '" TYPE="',
label, '">', nodename, '</A> \n')
# --- Sectioning commands ---
def popstack(self, type):
if (self.node):
self.node.type = type
while self.nodestack:
if self.nodestack[-1].type > type:
self.nodestack[-1].finalize()
self.nodestack[-1].flush()
del self.nodestack[-1]
elif self.nodestack[-1].type == type:
if not self.nodestack[-1].next:
self.nodestack[-1].next = self.node.name
if not self.node.prev:
self.node.prev = self.nodestack[-1].name
self.nodestack[-1].finalize()
self.nodestack[-1].flush()
del self.nodestack[-1]
else:
if type > 1 and not self.node.up:
self.node.up = self.nodestack[-1].name
break
def do_chapter(self, args):
self.heading('H1', args, 0)
self.popstack(1)
def do_unnumbered(self, args):
self.heading('H1', args, -1)
self.popstack(1)
def do_appendix(self, args):
self.heading('H1', args, -1)
self.popstack(1)
def do_top(self, args):
self.heading('H1', args, -1)
def do_chapheading(self, args):
self.heading('H1', args, -1)
def do_majorheading(self, args):
self.heading('H1', args, -1)
def do_section(self, args):
self.heading('H1', args, 1)
self.popstack(2)
def do_unnumberedsec(self, args):
self.heading('H1', args, -1)
self.popstack(2)
def do_appendixsec(self, args):
self.heading('H1', args, -1)
self.popstack(2)
do_appendixsection = do_appendixsec
def do_heading(self, args):
self.heading('H1', args, -1)
def do_subsection(self, args):
self.heading('H2', args, 2)
self.popstack(3)
def do_unnumberedsubsec(self, args):
self.heading('H2', args, -1)
self.popstack(3)
def do_appendixsubsec(self, args):
self.heading('H2', args, -1)
self.popstack(3)
def do_subheading(self, args):
self.heading('H2', args, -1)
def do_subsubsection(self, args):
self.heading('H3', args, 3)
self.popstack(4)
def do_unnumberedsubsubsec(self, args):
self.heading('H3', args, -1)
self.popstack(4)
def do_appendixsubsubsec(self, args):
self.heading('H3', args, -1)
self.popstack(4)
def do_subsubheading(self, args):
self.heading('H3', args, -1)
def heading(self, type, args, level):
if level >= 0:
while len(self.numbering) <= level:
self.numbering.append(0)
del self.numbering[level+1:]
self.numbering[level] = self.numbering[level] + 1
x = ''
for i in self.numbering:
x = x + repr(i) + '.'
args = x + ' ' + args
self.contents.append((level, args, self.nodename))
self.write('<', type, '>')
self.expand(args)
self.write('</', type, '>\n')
if self.debugging or self.print_headers:
print '---', args
def do_contents(self, args):
# pass
self.listcontents('Table of Contents', 999)
def do_shortcontents(self, args):
pass
# self.listcontents('Short Contents', 0)
do_summarycontents = do_shortcontents
def listcontents(self, title, maxlevel):
self.write('<H1>', title, '</H1>\n<UL COMPACT PLAIN>\n')
prevlevels = [0]
for level, title, node in self.contents:
if level > maxlevel:
continue
if level > prevlevels[-1]:
# can only advance one level at a time
self.write(' '*prevlevels[-1], '<UL PLAIN>\n')
prevlevels.append(level)
elif level < prevlevels[-1]:
# might drop back multiple levels
while level < prevlevels[-1]:
del prevlevels[-1]
self.write(' '*prevlevels[-1],
'</UL>\n')
self.write(' '*level, '<LI> <A HREF="',
makefile(node), '">')
self.expand(title)
self.write('</A>\n')
self.write('</UL>\n' * len(prevlevels))
# --- Page lay-out ---
# These commands are only meaningful in printed text
def do_page(self, args): pass
def do_need(self, args): pass
def bgn_group(self, args): pass
def end_group(self): pass
# --- Line lay-out ---
def do_sp(self, args):
if self.nofill:
self.write('\n')
else:
self.write('<P>\n')
def do_hline(self, args):
self.write('<HR>')
# --- Function and variable definitions ---
def bgn_deffn(self, args):
self.write('<DL>')
self.do_deffnx(args)
def end_deffn(self):
self.write('</DL>\n')
def do_deffnx(self, args):
self.write('<DT>')
words = splitwords(args, 2)
[category, name], rest = words[:2], words[2:]
self.expand('@b{%s}' % name)
for word in rest: self.expand(' ' + makevar(word))
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('fn', name)
def bgn_defun(self, args): self.bgn_deffn('Function ' + args)
end_defun = end_deffn
def do_defunx(self, args): self.do_deffnx('Function ' + args)
def bgn_defmac(self, args): self.bgn_deffn('Macro ' + args)
end_defmac = end_deffn
def do_defmacx(self, args): self.do_deffnx('Macro ' + args)
def bgn_defspec(self, args): self.bgn_deffn('{Special Form} ' + args)
end_defspec = end_deffn
def do_defspecx(self, args): self.do_deffnx('{Special Form} ' + args)
def bgn_defvr(self, args):
self.write('<DL>')
self.do_defvrx(args)
end_defvr = end_deffn
def do_defvrx(self, args):
self.write('<DT>')
words = splitwords(args, 2)
[category, name], rest = words[:2], words[2:]
self.expand('@code{%s}' % name)
# If there are too many arguments, show them
for word in rest: self.expand(' ' + word)
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('vr', name)
def bgn_defvar(self, args): self.bgn_defvr('Variable ' + args)
end_defvar = end_defvr
def do_defvarx(self, args): self.do_defvrx('Variable ' + args)
def bgn_defopt(self, args): self.bgn_defvr('{User Option} ' + args)
end_defopt = end_defvr
def do_defoptx(self, args): self.do_defvrx('{User Option} ' + args)
# --- Ditto for typed languages ---
def bgn_deftypefn(self, args):
self.write('<DL>')
self.do_deftypefnx(args)
end_deftypefn = end_deffn
def do_deftypefnx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, datatype, name], rest = words[:3], words[3:]
self.expand('@code{%s} @b{%s}' % (datatype, name))
for word in rest: self.expand(' ' + makevar(word))
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('fn', name)
def bgn_deftypefun(self, args): self.bgn_deftypefn('Function ' + args)
end_deftypefun = end_deftypefn
def do_deftypefunx(self, args): self.do_deftypefnx('Function ' + args)
def bgn_deftypevr(self, args):
self.write('<DL>')
self.do_deftypevrx(args)
end_deftypevr = end_deftypefn
def do_deftypevrx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, datatype, name], rest = words[:3], words[3:]
self.expand('@code{%s} @b{%s}' % (datatype, name))
# If there are too many arguments, show them
for word in rest: self.expand(' ' + word)
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('fn', name)
def bgn_deftypevar(self, args):
self.bgn_deftypevr('Variable ' + args)
end_deftypevar = end_deftypevr
def do_deftypevarx(self, args):
self.do_deftypevrx('Variable ' + args)
# --- Ditto for object-oriented languages ---
def bgn_defcv(self, args):
self.write('<DL>')
self.do_defcvx(args)
end_defcv = end_deftypevr
def do_defcvx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, classname, name], rest = words[:3], words[3:]
self.expand('@b{%s}' % name)
# If there are too many arguments, show them
for word in rest: self.expand(' ' + word)
#self.expand(' -- %s of @code{%s}' % (category, classname))
self.write('\n<DD>')
self.index('vr', '%s @r{on %s}' % (name, classname))
def bgn_defivar(self, args):
self.bgn_defcv('{Instance Variable} ' + args)
end_defivar = end_defcv
def do_defivarx(self, args):
self.do_defcvx('{Instance Variable} ' + args)
def bgn_defop(self, args):
self.write('<DL>')
self.do_defopx(args)
end_defop = end_defcv
def do_defopx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, classname, name], rest = words[:3], words[3:]
self.expand('@b{%s}' % name)
for word in rest: self.expand(' ' + makevar(word))
#self.expand(' -- %s of @code{%s}' % (category, classname))
self.write('\n<DD>')
self.index('fn', '%s @r{on %s}' % (name, classname))
def bgn_defmethod(self, args):
self.bgn_defop('Method ' + args)
end_defmethod = end_defop
def do_defmethodx(self, args):
self.do_defopx('Method ' + args)
# --- Ditto for data types ---
def bgn_deftp(self, args):
self.write('<DL>')
self.do_deftpx(args)
end_deftp = end_defcv
def do_deftpx(self, args):
self.write('<DT>')
words = splitwords(args, 2)
[category, name], rest = words[:2], words[2:]
self.expand('@b{%s}' % name)
for word in rest: self.expand(' ' + word)
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('tp', name)
# --- Making Lists and Tables
def bgn_enumerate(self, args):
if not args:
self.write('<OL>\n')
self.stackinfo[len(self.stack)] = '</OL>\n'
else:
self.itemnumber = args
self.write('<UL>\n')
self.stackinfo[len(self.stack)] = '</UL>\n'
def end_enumerate(self):
self.itemnumber = None
self.write(self.stackinfo[len(self.stack) + 1])
del self.stackinfo[len(self.stack) + 1]
def bgn_itemize(self, args):
self.itemarg = args
self.write('<UL>\n')
def end_itemize(self):
self.itemarg = None
self.write('</UL>\n')
def bgn_table(self, args):
self.itemarg = args
self.write('<DL>\n')
def end_table(self):
self.itemarg = None
self.write('</DL>\n')
def bgn_ftable(self, args):
self.itemindex = 'fn'
self.bgn_table(args)
def end_ftable(self):
self.itemindex = None
self.end_table()
def bgn_vtable(self, args):
self.itemindex = 'vr'
self.bgn_table(args)
def end_vtable(self):
self.itemindex = None
self.end_table()
def do_item(self, args):
if self.itemindex: self.index(self.itemindex, args)
if self.itemarg:
if self.itemarg[0] == '@' and self.itemarg[1] and \
self.itemarg[1] in string.ascii_letters:
args = self.itemarg + '{' + args + '}'
else:
# some other character, e.g. '-'
args = self.itemarg + ' ' + args
if self.itemnumber <> None:
args = self.itemnumber + '. ' + args
self.itemnumber = increment(self.itemnumber)
if self.stack and self.stack[-1] == 'table':
self.write('<DT>')
self.expand(args)
self.write('\n<DD>')
elif self.stack and self.stack[-1] == 'multitable':
self.write('<TR><TD>')
self.expand(args)
self.write('</TD>\n</TR>\n')
else:
self.write('<LI>')
self.expand(args)
self.write(' ')
do_itemx = do_item # XXX Should suppress leading blank line
# rpyron 2002-05-07 multitable support
def bgn_multitable(self, args):
self.itemarg = None # should be handled by columnfractions
self.write('<TABLE BORDER="">\n')
def end_multitable(self):
self.itemarg = None
self.write('</TABLE>\n<BR>\n')
def handle_columnfractions(self):
# It would be better to handle this, but for now it's in the way...
self.itemarg = None
def handle_tab(self):
self.write('</TD>\n <TD>')
# --- Enumerations, displays, quotations ---
# XXX Most of these should increase the indentation somehow
def bgn_quotation(self, args): self.write('<BLOCKQUOTE>')
def end_quotation(self): self.write('</BLOCKQUOTE>\n')
def bgn_example(self, args):
self.nofill = self.nofill + 1
self.write('<PRE>')
def end_example(self):
self.write('</PRE>\n')
self.nofill = self.nofill - 1
bgn_lisp = bgn_example # Synonym when contents are executable lisp code
end_lisp = end_example
bgn_smallexample = bgn_example # XXX Should use smaller font
end_smallexample = end_example
bgn_smalllisp = bgn_lisp # Ditto
end_smalllisp = end_lisp
bgn_display = bgn_example
end_display = end_example
bgn_format = bgn_display
end_format = end_display
def do_exdent(self, args): self.expand(args + '\n')
# XXX Should really mess with indentation
def bgn_flushleft(self, args):
self.nofill = self.nofill + 1
self.write('<PRE>\n')
def end_flushleft(self):
self.write('</PRE>\n')
self.nofill = self.nofill - 1
def bgn_flushright(self, args):
self.nofill = self.nofill + 1
self.write('<ADDRESS COMPACT>\n')
def end_flushright(self):
self.write('</ADDRESS>\n')
self.nofill = self.nofill - 1
def bgn_menu(self, args):
self.write('<DIR>\n')
self.write(' <STRONG><EM>Menu</EM></STRONG><P>\n')
self.htmlhelp.beginmenu()
def end_menu(self):
self.write('</DIR>\n')
self.htmlhelp.endmenu()
def bgn_cartouche(self, args): pass
def end_cartouche(self): pass
# --- Indices ---
def resetindex(self):
self.noncodeindices = ['cp']
self.indextitle = {}
self.indextitle['cp'] = 'Concept'
self.indextitle['fn'] = 'Function'
self.indextitle['ky'] = 'Keyword'
self.indextitle['pg'] = 'Program'
self.indextitle['tp'] = 'Type'
self.indextitle['vr'] = 'Variable'
#
self.whichindex = {}
for name in self.indextitle.keys():
self.whichindex[name] = []
def user_index(self, name, args):
if self.whichindex.has_key(name):
self.index(name, args)
else:
print '*** No index named', repr(name)
def do_cindex(self, args): self.index('cp', args)
def do_findex(self, args): self.index('fn', args)
def do_kindex(self, args): self.index('ky', args)
def do_pindex(self, args): self.index('pg', args)
def do_tindex(self, args): self.index('tp', args)
def do_vindex(self, args): self.index('vr', args)
def index(self, name, args):
self.whichindex[name].append((args, self.nodename))
self.htmlhelp.index(args, self.nodename)
def do_synindex(self, args):
words = args.split()
if len(words) <> 2:
print '*** bad @synindex', args
return
[old, new] = words
if not self.whichindex.has_key(old) or \
not self.whichindex.has_key(new):
print '*** bad key(s) in @synindex', args
return
if old <> new and \
self.whichindex[old] is not self.whichindex[new]:
inew = self.whichindex[new]
inew[len(inew):] = self.whichindex[old]
self.whichindex[old] = inew
do_syncodeindex = do_synindex # XXX Should use code font
def do_printindex(self, args):
words = args.split()
for name in words:
if self.whichindex.has_key(name):
self.prindex(name)
else:
print '*** No index named', repr(name)
def prindex(self, name):
iscodeindex = (name not in self.noncodeindices)
index = self.whichindex[name]
if not index: return
if self.debugging:
print '!'*self.debugging, '--- Generating', \
self.indextitle[name], 'index'
# The node already provides a title
index1 = []
junkprog = re.compile('^(@[a-z]+)?{')
for key, node in index:
sortkey = key.lower()
# Remove leading `@cmd{' from sort key
# -- don't bother about the matching `}'
oldsortkey = sortkey
while 1:
mo = junkprog.match(sortkey)
if not mo:
break
i = mo.end()
sortkey = sortkey[i:]
index1.append((sortkey, key, node))
del index[:]
index1.sort()
self.write('<DL COMPACT>\n')
prevkey = prevnode = None
for sortkey, key, node in index1:
if (key, node) == (prevkey, prevnode):
continue
if self.debugging > 1: print '!'*self.debugging, key, ':', node
self.write('<DT>')
if iscodeindex: key = '@code{' + key + '}'
if key != prevkey:
self.expand(key)
self.write('\n<DD><A HREF="%s">%s</A>\n' % (makefile(node), node))
prevkey, prevnode = key, node
self.write('</DL>\n')
# --- Final error reports ---
def report(self):
if self.unknown:
print '--- Unrecognized commands ---'
cmds = self.unknown.keys()
cmds.sort()
for cmd in cmds:
print cmd.ljust(20), self.unknown[cmd]
class TexinfoParserHTML3(TexinfoParser):
COPYRIGHT_SYMBOL = "©"
FN_ID_PATTERN = "[%(id)s]"
FN_SOURCE_PATTERN = '<A ID=footnoteref%(id)s ' \
'HREF="#footnotetext%(id)s">' + FN_ID_PATTERN + '</A>'
FN_TARGET_PATTERN = '<FN ID=footnotetext%(id)s>\n' \
'<P><A HREF="#footnoteref%(id)s">' + FN_ID_PATTERN \
+ '</A>\n%(text)s</P></FN>\n'
FN_HEADER = '<DIV CLASS=footnotes>\n <HR NOSHADE WIDTH=200>\n' \
' <STRONG><EM>Footnotes</EM></STRONG>\n <P>\n'
Node = HTML3Node
def bgn_quotation(self, args): self.write('<BQ>')
def end_quotation(self): self.write('</BQ>\n')
def bgn_example(self, args):
# this use of <CODE> would not be legal in HTML 2.0,
# but is in more recent DTDs.
self.nofill = self.nofill + 1
self.write('<PRE CLASS=example><CODE>')
def end_example(self):
self.write("</CODE></PRE>\n")
self.nofill = self.nofill - 1
def bgn_flushleft(self, args):
self.nofill = self.nofill + 1
self.write('<PRE CLASS=flushleft>\n')
def bgn_flushright(self, args):
self.nofill = self.nofill + 1
self.write('<DIV ALIGN=right CLASS=flushright><ADDRESS COMPACT>\n')
def end_flushright(self):
self.write('</ADDRESS></DIV>\n')
self.nofill = self.nofill - 1
def bgn_menu(self, args):
self.write('<UL PLAIN CLASS=menu>\n')
self.write(' <LH>Menu</LH>\n')
def end_menu(self):
self.write('</UL>\n')
# rpyron 2002-05-07
class HTMLHelp:
"""
This class encapsulates support for HTML Help. Node names,
file names, menu items, index items, and image file names are
accumulated until a call to finalize(). At that time, three
output files are created in the current directory:
`helpbase`.hhp is a HTML Help Workshop project file.
It contains various information, some of
which I do not understand; I just copied
the default project info from a fresh
installation.
`helpbase`.hhc is the Contents file for the project.
`helpbase`.hhk is the Index file for the project.
When these files are used as input to HTML Help Workshop,
the resulting file will be named:
`helpbase`.chm
If none of the defaults in `helpbase`.hhp are changed,
the .CHM file will have Contents, Index, Search, and
Favorites tabs.
"""
codeprog = re.compile('@code{(.*?)}')
def __init__(self,helpbase,dirname):
self.helpbase = helpbase
self.dirname = dirname
self.projectfile = None
self.contentfile = None
self.indexfile = None
self.nodelist = []
self.nodenames = {} # nodename : index
self.nodeindex = {}
self.filenames = {} # filename : filename
self.indexlist = [] # (args,nodename) == (key,location)
self.current = ''
self.menudict = {}
self.dumped = {}
def addnode(self,name,next,prev,up,filename):
node = (name,next,prev,up,filename)
# add this file to dict
# retrieve list with self.filenames.values()
self.filenames[filename] = filename
# add this node to nodelist
self.nodeindex[name] = len(self.nodelist)
self.nodelist.append(node)
# set 'current' for menu items
self.current = name
self.menudict[self.current] = []
def menuitem(self,nodename):
menu = self.menudict[self.current]
menu.append(nodename)
def addimage(self,imagename):
self.filenames[imagename] = imagename
def index(self, args, nodename):
self.indexlist.append((args,nodename))
def beginmenu(self):
pass
def endmenu(self):
pass
def finalize(self):
if not self.helpbase:
return
# generate interesting filenames
resultfile = self.helpbase + '.chm'
projectfile = self.helpbase + '.hhp'
contentfile = self.helpbase + '.hhc'
indexfile = self.helpbase + '.hhk'
# generate a reasonable title
title = self.helpbase
# get the default topic file
(topname,topnext,topprev,topup,topfile) = self.nodelist[0]
defaulttopic = topfile
# PROJECT FILE
try:
fp = open(projectfile,'w')
print>>fp, '[OPTIONS]'
print>>fp, 'Auto Index=Yes'
print>>fp, 'Binary TOC=No'
print>>fp, 'Binary Index=Yes'
print>>fp, 'Compatibility=1.1'
print>>fp, 'Compiled file=' + resultfile + ''
print>>fp, 'Contents file=' + contentfile + ''
print>>fp, 'Default topic=' + defaulttopic + ''
print>>fp, 'Error log file=ErrorLog.log'
print>>fp, 'Index file=' + indexfile + ''
print>>fp, 'Title=' + title + ''
print>>fp, 'Display compile progress=Yes'
print>>fp, 'Full-text search=Yes'
print>>fp, 'Default window=main'
print>>fp, ''
print>>fp, '[WINDOWS]'
print>>fp, ('main=,"' + contentfile + '","' + indexfile
+ '","","",,,,,0x23520,222,0x1046,[10,10,780,560],'
'0xB0000,,,,,,0')
print>>fp, ''
print>>fp, '[FILES]'
print>>fp, ''
self.dumpfiles(fp)
fp.close()
except IOError, msg:
print projectfile, ':', msg
sys.exit(1)
# CONTENT FILE
try:
fp = open(contentfile,'w')
print>>fp, '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">'
print>>fp, '<!-- This file defines the table of contents -->'
print>>fp, '<HTML>'
print>>fp, '<HEAD>'
print>>fp, ('<meta name="GENERATOR"'
'content="Microsoft® HTML Help Workshop 4.1">')
print>>fp, '<!-- Sitemap 1.0 -->'
print>>fp, '</HEAD>'
print>>fp, '<BODY>'
print>>fp, ' <OBJECT type="text/site properties">'
print>>fp, ' <param name="Window Styles" value="0x800025">'
print>>fp, ' <param name="comment" value="title:">'
print>>fp, ' <param name="comment" value="base:">'
print>>fp, ' </OBJECT>'
self.dumpnodes(fp)
print>>fp, '</BODY>'
print>>fp, '</HTML>'
fp.close()
except IOError, msg:
print contentfile, ':', msg
sys.exit(1)
# INDEX FILE
try:
fp = open(indexfile ,'w')
print>>fp, '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">'
print>>fp, '<!-- This file defines the index -->'
print>>fp, '<HTML>'
print>>fp, '<HEAD>'
print>>fp, ('<meta name="GENERATOR"'
'content="Microsoft® HTML Help Workshop 4.1">')
print>>fp, '<!-- Sitemap 1.0 -->'
print>>fp, '</HEAD>'
print>>fp, '<BODY>'
print>>fp, '<OBJECT type="text/site properties">'
print>>fp, '</OBJECT>'
self.dumpindex(fp)
print>>fp, '</BODY>'
print>>fp, '</HTML>'
fp.close()
except IOError, msg:
print indexfile , ':', msg
sys.exit(1)
def dumpfiles(self, outfile=sys.stdout):
filelist = self.filenames.values()
filelist.sort()
for filename in filelist:
print>>outfile, filename
def dumpnodes(self, outfile=sys.stdout):
self.dumped = {}
if self.nodelist:
nodename, dummy, dummy, dummy, dummy = self.nodelist[0]
self.topnode = nodename
print>>outfile, '<UL>'
for node in self.nodelist:
self.dumpnode(node,0,outfile)
print>>outfile, '</UL>'
def dumpnode(self, node, indent=0, outfile=sys.stdout):
if node:
# Retrieve info for this node
(nodename,next,prev,up,filename) = node
self.current = nodename
# Have we been dumped already?
if self.dumped.has_key(nodename):
return
self.dumped[nodename] = 1
# Print info for this node
print>>outfile, ' '*indent,
print>>outfile, '<LI><OBJECT type="text/sitemap">',
print>>outfile, '<param name="Name" value="' + nodename +'">',
print>>outfile, '<param name="Local" value="'+ filename +'">',
print>>outfile, '</OBJECT>'
# Does this node have menu items?
try:
menu = self.menudict[nodename]
self.dumpmenu(menu,indent+2,outfile)
except KeyError:
pass
def dumpmenu(self, menu, indent=0, outfile=sys.stdout):
if menu:
currentnode = self.current
if currentnode != self.topnode: # XXX this is a hack
print>>outfile, ' '*indent + '<UL>'
indent += 2
for item in menu:
menunode = self.getnode(item)
self.dumpnode(menunode,indent,outfile)
if currentnode != self.topnode: # XXX this is a hack
print>>outfile, ' '*indent + '</UL>'
indent -= 2
def getnode(self, nodename):
try:
index = self.nodeindex[nodename]
return self.nodelist[index]
except KeyError:
return None
except IndexError:
return None
# (args,nodename) == (key,location)
def dumpindex(self, outfile=sys.stdout):
print>>outfile, '<UL>'
for (key,location) in self.indexlist:
key = self.codeexpand(key)
location = makefile(location)
location = self.dirname + '/' + location
print>>outfile, '<LI><OBJECT type="text/sitemap">',
print>>outfile, '<param name="Name" value="' + key + '">',
print>>outfile, '<param name="Local" value="' + location + '">',
print>>outfile, '</OBJECT>'
print>>outfile, '</UL>'
def codeexpand(self, line):
co = self.codeprog.match(line)
if not co:
return line
bgn, end = co.span(0)
a, b = co.span(1)
line = line[:bgn] + line[a:b] + line[end:]
return line
# Put @var{} around alphabetic substrings
def makevar(str):
return '@var{'+str+'}'
# Split a string in "words" according to findwordend
def splitwords(str, minlength):
words = []
i = 0
n = len(str)
while i < n:
while i < n and str[i] in ' \t\n': i = i+1
if i >= n: break
start = i
i = findwordend(str, i, n)
words.append(str[start:i])
while len(words) < minlength: words.append('')
return words
# Find the end of a "word", matching braces and interpreting @@ @{ @}
fwprog = re.compile('[@{} ]')
def findwordend(str, i, n):
level = 0
while i < n:
mo = fwprog.search(str, i)
if not mo:
break
i = mo.start()
c = str[i]; i = i+1
if c == '@': i = i+1 # Next character is not special
elif c == '{': level = level+1
elif c == '}': level = level-1
elif c == ' ' and level <= 0: return i-1
return n
# Convert a node name into a file name
def makefile(nodename):
nodename = nodename.strip()
return fixfunnychars(nodename) + '.html'
# Characters that are perfectly safe in filenames and hyperlinks
goodchars = string.ascii_letters + string.digits + '!@-=+.'
# Replace characters that aren't perfectly safe by dashes
# Underscores are bad since Cern HTTPD treats them as delimiters for
# encoding times, so you get mismatches if you compress your files:
# a.html.gz will map to a_b.html.gz
def fixfunnychars(addr):
i = 0
while i < len(addr):
c = addr[i]
if c not in goodchars:
c = '-'
addr = addr[:i] + c + addr[i+1:]
i = i + len(c)
return addr
# Increment a string used as an enumeration
def increment(s):
if not s:
return '1'
for sequence in string.digits, string.lowercase, string.uppercase:
lastc = s[-1]
if lastc in sequence:
i = sequence.index(lastc) + 1
if i >= len(sequence):
if len(s) == 1:
s = sequence[0]*2
if s == '00':
s = '10'
else:
s = increment(s[:-1]) + sequence[0]
else:
s = s[:-1] + sequence[i]
return s
return s # Don't increment
def test():
import sys
debugging = 0
print_headers = 0
cont = 0
html3 = 0
htmlhelp = ''
while sys.argv[1] == ['-d']:
debugging = debugging + 1
del sys.argv[1]
if sys.argv[1] == '-p':
print_headers = 1
del sys.argv[1]
if sys.argv[1] == '-c':
cont = 1
del sys.argv[1]
if sys.argv[1] == '-3':
html3 = 1
del sys.argv[1]
if sys.argv[1] == '-H':
helpbase = sys.argv[2]
del sys.argv[1:3]
if len(sys.argv) <> 3:
print 'usage: texi2hh [-d [-d]] [-p] [-c] [-3] [-H htmlhelp]', \
'inputfile outputdirectory'
sys.exit(2)
if html3:
parser = TexinfoParserHTML3()
else:
parser = TexinfoParser()
parser.cont = cont
parser.debugging = debugging
parser.print_headers = print_headers
file = sys.argv[1]
dirname = sys.argv[2]
parser.setdirname(dirname)
parser.setincludedir(os.path.dirname(file))
htmlhelp = HTMLHelp(helpbase, dirname)
parser.sethtmlhelp(htmlhelp)
try:
fp = open(file, 'r')
except IOError, msg:
print file, ':', msg
sys.exit(1)
parser.parse(fp)
fp.close()
parser.report()
htmlhelp.finalize()
if __name__ == "__main__":
test()
| gpl-2.0 | 8,282,592,934,078,301,000 | 32.60154 | 78 | 0.517859 | false |
russelhampton05/MenMew | webApp/Team3/MenMew/migrations/0001_initial.py | 1 | 8120 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-25 03:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('line1', models.CharField(max_length=45)),
('line2', models.CharField(max_length=45)),
('city', models.CharField(max_length=45)),
('state', models.CharField(max_length=2)),
('zip', models.PositiveIntegerField(max_length=5)),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=45)),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='IngredientCategory',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=45)),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=45)),
('desc', models.CharField(max_length=255)),
('item_photo', models.CharField(max_length=255)),
('item_price', models.DecimalField(decimal_places=2, max_digits=10)),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='ItemCategory',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=45)),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='ItemCustomization',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('add', models.BinaryField()),
('ingredient_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_customization', to='MenMew.Ingredient')),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='ItemIngredient',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='ItemOrdered',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('item_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MenMew.Item')),
],
),
migrations.CreateModel(
name='Restaurant',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=45)),
('desc', models.CharField(max_length=255)),
('address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='restaurant', to='MenMew.Address')),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='Server',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('employee_id', models.CharField(max_length=45)),
('restaurant_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MenMew.Restaurant')),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='Table',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('code', models.CharField(max_length=45)),
('restaurant_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tables', to='MenMew.Restaurant')),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('restaurant_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='MenMew.Restaurant')),
('server_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='MenMew.Server')),
('table_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='MenMew.Table')),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('email', models.CharField(max_length=45)),
('password', models.CharField(max_length=128)),
('salt', models.CharField(max_length=16)),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
],
options={
'ordering': ('id',),
},
),
migrations.AddField(
model_name='ticket',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='MenMew.User'),
),
migrations.AddField(
model_name='itemordered',
name='ticket_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MenMew.User'),
),
migrations.AddField(
model_name='itemordered',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items_ordered', to='MenMew.Ticket'),
),
migrations.AddField(
model_name='itemcustomization',
name='item_ordered_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_customization', to='MenMew.ItemOrdered'),
),
migrations.AddField(
model_name='item',
name='item_category_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MenMew.ItemCategory'),
),
migrations.AddField(
model_name='item',
name='restaurant_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='MenMew.Restaurant'),
),
migrations.AddField(
model_name='ingredient',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ingredients', to='MenMew.IngredientCategory'),
),
]
| mit | 2,713,011,691,249,339,400 | 40.010101 | 157 | 0.524877 | false |
cdeepakroy/TubeTK | Examples/SegmentVesselsUsingNeuralNetworks/scripts/PreProcessing.py | 1 | 5096 | #!/usr/bin/python
###########################################################################
# PreProcessing.py :
#
# Iterate through the expert labelmap and create 65x65 patches around the
# central pixel. All positive pixels are used as positives input cases.
# The same amount of negatives is randomly picked. For each input patch,
# the corresponding filename and expected output are written to a text file
# and will be used later to create the database.
#
###########################################################################
import os, glob, sys
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import random, math
# Create image set and save expected output
def createImgSet( expertImg, inputImg, filenamePrefix, fileOutputDir,
textFilename ):
w = 32; #Patch size
count = 0; #Input count
negativeImageIndex = [] #Whole image index giving negative output
negativeIndex = [] #Vessel bound index giving negative output
# Write filename and expected output
textFile = open(textFilename, "a")
textFile.truncate() # Erase file
resample = 1;#WARNING: resample pixels to reduce training size for Debug
# Iterate through the expert label map
for i in range(0,expertImg.shape[0],resample):
for j in range(0,expertImg.shape[1],resample):
if j>w and j+w+1<inputImg.shape[1] :
if i>w and i+w+1<inputImg.shape[0]:
# Centerline pixel (positive)
if expertImg[i,j] > 0.5:
count = count + 1
filename = filenamePrefix + "_" + str(i) + "_" + str(j) +".png"
textFile.write(filename + " " + str(1) + "\n")
plt.imsave(fileOutputDir + filename, inputImg[i-w:i+w+1,j-w:j+w+1],cmap='Greys_r')
# Vessel bound pixel (negative)
elif expertImg[i,j] > 0:
negativeIndex.append([i,j])
# Background pixel (negative)
else :
negativeImageIndex.append([i,j])
# Pick random negatives from vessel bound
rndmNegativeInd = random.sample(negativeIndex, int(math.ceil(0.8*count)))
for [i,j] in rndmNegativeInd :
filename = filenamePrefix + "_" + str(i) + "_" + str(j) + ".png"
textFile.write(filename + " " + str(0) + "\n")
plt.imsave(fileOutputDir + filename, inputImg[i-w:i+w+1,j-w:j+w+1],cmap='Greys_r')
# Pick random negatives from the entire image
rndmNegativeImageInd = random.sample(negativeImageIndex, int(math.ceil(0.2*count)))
for [i,j] in rndmNegativeImageInd :
filename = filenamePrefix + "_" + str(i) + "_" + str(j) + ".png"
textFile.write(filename + " " + str(0) + "\n")
plt.imsave(fileOutputDir + filename, inputImg[i-w:i+w+1,j-w:j+w+1],cmap='Greys_r')
textFile.close()
print count
########
# Main #
########
# Path variable
hardDrive_root = "/media/lucas/krs0014/"
caffe_root = "./"
# Text file
trainFilename = caffe_root + "data/SegmentVesselsUsingNeuralNetworks/train.txt"
trainFile = open(trainFilename, "w+")
trainFile.truncate() # Erase file
trainFile.close()
valFilename = caffe_root + "data/SegmentVesselsUsingNeuralNetworks/val.txt"
valFile = open(valFilename, "w+")
valFile.truncate() # Erase file
valFile.close()
# Output patches directories
trainFileOutputDir= hardDrive_root + "SegmentVesselsUsingNeuralNetworks/training/out/"
valFileOutputDir = hardDrive_root + "SegmentVesselsUsingNeuralNetworks/testing/out/"
# Images directories
trainExpertDir = hardDrive_root + "SegmentVesselsUsingNeuralNetworks/training/expert/"
trainImgDir = hardDrive_root + "SegmentVesselsUsingNeuralNetworks/training/images/"
valExpertDir = hardDrive_root + "SegmentVesselsUsingNeuralNetworks/testing/expert/"
valImgDir = hardDrive_root + "SegmentVesselsUsingNeuralNetworks/testing/images/"
# Create train set
trainImages = glob.glob( os.path.join( trainImgDir, "*.png" ) )
for trainImage in trainImages:
print trainImage
# Get image ID
trainImagePrefix = os.path.basename(os.path.splitext(trainImage)[0])
# Set filename
trainExpertFilename = trainExpertDir + trainImagePrefix + "_expert.png"
trainImgFilename = trainImgDir + trainImagePrefix + ".png"
# Load images
trainExpert=mpimg.imread(trainExpertFilename)
#print trainExpert.shape
#trainExpert=trainExpert[:,:,0]
trainImg=mpimg.imread(trainImgFilename)
#trainImg=trainImg[:,:,0]
# Write images and text files
createImgSet( trainExpert, trainImg, trainImagePrefix, trainFileOutputDir, trainFilename )
#Create validation set
valImages = glob.glob( os.path.join( valImgDir, "*.png" ) )
for valImage in valImages:
print valImage
# Get image ID
valImagePrefix = os.path.basename(os.path.splitext(valImage)[0])
# Set filename
valExpertFilename = valExpertDir + valImagePrefix + "_expert.png"
valImgFilename = valImgDir + valImagePrefix + ".png"
# Load images
valExpert=mpimg.imread(valExpertFilename)
#valExpert=valExpert[:,:,0]
valImg=mpimg.imread(valImgFilename)
#valImg=valImg[:,:,0]
# Write images and text files
createImgSet( valExpert, valImg, valImagePrefix, valFileOutputDir, valFilename )
| apache-2.0 | 8,683,476,992,294,881,000 | 36.748148 | 98 | 0.683477 | false |
marc-sensenich/ansible | lib/ansible/plugins/callback/junit.py | 62 | 13564 | # (c) 2016 Matt Clay <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: junit
type: aggregate
short_description: write playbook output to a JUnit file.
version_added: historical
description:
- This callback writes playbook output to a JUnit formatted XML file.
- "Tasks show up in the report as follows:
'ok': pass
'failed' with 'EXPECTED FAILURE' in the task name: pass
'failed' with 'TOGGLE RESULT' in the task name: pass
'ok' with 'TOGGLE RESULT' in the task name: failure
'failed' due to an exception: error
'failed' for other reasons: failure
'skipped': skipped"
options:
output_dir:
name: JUnit output dir
default: ~/.ansible.log
description: Directory to write XML files to.
env:
- name: JUNIT_OUTPUT_DIR
task_class:
name: JUnit Task class
default: False
description: Configure the output to be one class per yaml file
env:
- name: JUNIT_TASK_CLASS
task_relative_path:
name: JUnit Task relative path
default: none
description: Configure the output to use relative paths to given directory
version_added: "2.8"
env:
- name: JUNIT_TASK_RELATIVE_PATH
fail_on_change:
name: JUnit fail on change
default: False
description: Consider any tasks reporting "changed" as a junit test failure
env:
- name: JUNIT_FAIL_ON_CHANGE
fail_on_ignore:
name: JUnit fail on ignore
default: False
description: Consider failed tasks as a junit test failure even if ignore_on_error is set
env:
- name: JUNIT_FAIL_ON_IGNORE
include_setup_tasks_in_report:
name: JUnit include setup tasks in report
default: True
description: Should the setup tasks be included in the final report
env:
- name: JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT
hide_task_arguments:
name: Hide the arguments for a task
default: False
description: Hide the arguments for a task
version_added: "2.8"
env:
- name: JUNIT_HIDE_TASK_ARGUMENTS
test_case_prefix:
name: Prefix to find actual test cases
default: <empty>
description: Consider a task only as test case if it has this value as prefix. Additionaly failing tasks are recorded as failed test cases.
version_added: "2.8"
env:
- name: JUNIT_TEST_CASE_PREFIX
requirements:
- whitelist in configuration
- junit_xml (python lib)
'''
import os
import time
import re
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.callback import CallbackBase
try:
from junit_xml import TestSuite, TestCase
HAS_JUNIT_XML = True
except ImportError:
HAS_JUNIT_XML = False
try:
from collections import OrderedDict
HAS_ORDERED_DICT = True
except ImportError:
try:
from ordereddict import OrderedDict
HAS_ORDERED_DICT = True
except ImportError:
HAS_ORDERED_DICT = False
class CallbackModule(CallbackBase):
"""
This callback writes playbook output to a JUnit formatted XML file.
Tasks show up in the report as follows:
'ok': pass
'failed' with 'EXPECTED FAILURE' in the task name: pass
'failed' with 'TOGGLE RESULT' in the task name: pass
'ok' with 'TOGGLE RESULT' in the task name: failure
'failed' due to an exception: error
'failed' for other reasons: failure
'skipped': skipped
This plugin makes use of the following environment variables:
JUNIT_OUTPUT_DIR (optional): Directory to write XML files to.
Default: ~/.ansible.log
JUNIT_TASK_CLASS (optional): Configure the output to be one class per yaml file
Default: False
JUNIT_TASK_RELATIVE_PATH (optional): Configure the output to use relative paths to given directory
Default: none
JUNIT_FAIL_ON_CHANGE (optional): Consider any tasks reporting "changed" as a junit test failure
Default: False
JUNIT_FAIL_ON_IGNORE (optional): Consider failed tasks as a junit test failure even if ignore_on_error is set
Default: False
JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT (optional): Should the setup tasks be included in the final report
Default: True
JUNIT_HIDE_TASK_ARGUMENTS (optional): Hide the arguments for a task
Default: False
JUNIT_TEST_CASE_PREFIX (optional): Consider a task only as test case if it has this value as prefix. Additionaly failing tasks are recorded as failed
test cases.
Default: <empty>
Requires:
junit_xml
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'junit'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
self._output_dir = os.getenv('JUNIT_OUTPUT_DIR', os.path.expanduser('~/.ansible.log'))
self._task_class = os.getenv('JUNIT_TASK_CLASS', 'False').lower()
self._task_relative_path = os.getenv('JUNIT_TASK_RELATIVE_PATH', '')
self._fail_on_change = os.getenv('JUNIT_FAIL_ON_CHANGE', 'False').lower()
self._fail_on_ignore = os.getenv('JUNIT_FAIL_ON_IGNORE', 'False').lower()
self._include_setup_tasks_in_report = os.getenv('JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT', 'True').lower()
self._hide_task_arguments = os.getenv('JUNIT_HIDE_TASK_ARGUMENTS', 'False').lower()
self._test_case_prefix = os.getenv('JUNIT_TEST_CASE_PREFIX', '')
self._playbook_path = None
self._playbook_name = None
self._play_name = None
self._task_data = None
self.disabled = False
if not HAS_JUNIT_XML:
self.disabled = True
self._display.warning('The `junit_xml` python module is not installed. '
'Disabling the `junit` callback plugin.')
if HAS_ORDERED_DICT:
self._task_data = OrderedDict()
else:
self.disabled = True
self._display.warning('The `ordereddict` python module is not installed. '
'Disabling the `junit` callback plugin.')
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
def _start_task(self, task):
""" record the start of a task for one or more hosts """
uuid = task._uuid
if uuid in self._task_data:
return
play = self._play_name
name = task.get_name().strip()
path = task.get_path()
action = task.action
if not task.no_log and self._hide_task_arguments == 'false':
args = ', '.join(('%s=%s' % a for a in task.args.items()))
if args:
name += ' ' + args
self._task_data[uuid] = TaskData(uuid, name, path, play, action)
def _finish_task(self, status, result):
""" record the results of a task for a single host """
task_uuid = result._task._uuid
if hasattr(result, '_host'):
host_uuid = result._host._uuid
host_name = result._host.name
else:
host_uuid = 'include'
host_name = 'include'
task_data = self._task_data[task_uuid]
if self._fail_on_change == 'true' and status == 'ok' and result._result.get('changed', False):
status = 'failed'
# ignore failure if expected and toggle result if asked for
if status == 'failed' and 'EXPECTED FAILURE' in task_data.name:
status = 'ok'
elif 'TOGGLE RESULT' in task_data.name:
if status == 'failed':
status = 'ok'
elif status == 'ok':
status = 'failed'
if task_data.name.startswith(self._test_case_prefix) or status == 'failed':
task_data.add_host(HostData(host_uuid, host_name, status, result))
def _build_test_case(self, task_data, host_data):
""" build a TestCase from the given TaskData and HostData """
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
duration = host_data.finish - task_data.start
if self._task_relative_path:
junit_classname = os.path.relpath(task_data.path, self._task_relative_path)
else:
junit_classname = task_data.path
if self._task_class == 'true':
junit_classname = re.sub(r'\.yml:[0-9]+$', '', junit_classname)
if host_data.status == 'included':
return TestCase(name, junit_classname, duration, host_data.result)
res = host_data.result._result
rc = res.get('rc', 0)
dump = self._dump_results(res, indent=0)
dump = self._cleanse_string(dump)
if host_data.status == 'ok':
return TestCase(name, junit_classname, duration, dump)
test_case = TestCase(name, junit_classname, duration)
if host_data.status == 'failed':
if 'exception' in res:
message = res['exception'].strip().split('\n')[-1]
output = res['exception']
test_case.add_error_info(message, output)
elif 'msg' in res:
message = res['msg']
test_case.add_failure_info(message, dump)
else:
test_case.add_failure_info('rc=%s' % rc, dump)
elif host_data.status == 'skipped':
if 'skip_reason' in res:
message = res['skip_reason']
else:
message = 'skipped'
test_case.add_skipped_info(message)
return test_case
def _cleanse_string(self, value):
""" convert surrogate escapes to the unicode replacement character to avoid XML encoding errors """
return to_text(to_bytes(value, errors='surrogateescape'), errors='replace')
def _generate_report(self):
""" generate a TestSuite report from the collected TaskData and HostData """
test_cases = []
for task_uuid, task_data in self._task_data.items():
if task_data.action == 'setup' and self._include_setup_tasks_in_report == 'false':
continue
for host_uuid, host_data in task_data.host_data.items():
test_cases.append(self._build_test_case(task_data, host_data))
test_suite = TestSuite(self._playbook_name, test_cases)
report = TestSuite.to_xml_string([test_suite])
output_file = os.path.join(self._output_dir, '%s-%s.xml' % (self._playbook_name, time.time()))
with open(output_file, 'wb') as xml:
xml.write(to_bytes(report, errors='surrogate_or_strict'))
def v2_playbook_on_start(self, playbook):
self._playbook_path = playbook._file_name
self._playbook_name = os.path.splitext(os.path.basename(self._playbook_path))[0]
def v2_playbook_on_play_start(self, play):
self._play_name = play.get_name()
def v2_runner_on_no_hosts(self, task):
self._start_task(task)
def v2_playbook_on_task_start(self, task, is_conditional):
self._start_task(task)
def v2_playbook_on_cleanup_task_start(self, task):
self._start_task(task)
def v2_playbook_on_handler_task_start(self, task):
self._start_task(task)
def v2_runner_on_failed(self, result, ignore_errors=False):
if ignore_errors and self._fail_on_ignore != 'true':
self._finish_task('ok', result)
else:
self._finish_task('failed', result)
def v2_runner_on_ok(self, result):
self._finish_task('ok', result)
def v2_runner_on_skipped(self, result):
self._finish_task('skipped', result)
def v2_playbook_on_include(self, included_file):
self._finish_task('included', included_file)
def v2_playbook_on_stats(self, stats):
self._generate_report()
class TaskData:
"""
Data about an individual task.
"""
def __init__(self, uuid, name, path, play, action):
self.uuid = uuid
self.name = name
self.path = path
self.play = play
self.start = None
self.host_data = OrderedDict()
self.start = time.time()
self.action = action
def add_host(self, host):
if host.uuid in self.host_data:
if host.status == 'included':
# concatenate task include output from multiple items
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
else:
raise Exception('%s: %s: %s: duplicate host callback: %s' % (self.path, self.play, self.name, host.name))
self.host_data[host.uuid] = host
class HostData:
"""
Data about an individual host.
"""
def __init__(self, uuid, name, status, result):
self.uuid = uuid
self.name = name
self.status = status
self.result = result
self.finish = time.time()
| gpl-3.0 | 1,691,005,793,983,737,300 | 35.462366 | 157 | 0.590976 | false |
hendrikx-itc/python-minerva | src/minerva/loading/csv/parser.py | 1 | 4403 | from operator import itemgetter
import csv
import datetime
from itertools import chain, islice
import dateutil.parser
from minerva.directory.entityref import entity_name_ref_class
from minerva.error import ConfigurationError
from minerva.harvest.plugin_api_trend import HarvestParserTrend
from minerva.storage.trend.trend import Trend
from minerva.storage.trend.datapackage import DataPackage, DataPackageType
from minerva.storage.trend.granularity import create_granularity
from minerva.storage.datatype import registry
DEFAULT_CHUNK_SIZE = 5000
DEFAULT_CONFIG = {
"timestamp": "timestamp",
"identifier": "entity",
"delimiter": ",",
"chunk_size": DEFAULT_CHUNK_SIZE
}
class Parser(HarvestParserTrend):
def __init__(self, config):
if config is None:
self.config = DEFAULT_CONFIG
else:
self.config = config
def load_packages(self, stream, name):
csv_reader = csv.reader(stream, delimiter=self.config['delimiter'])
header = next(csv_reader)
timestamp_provider = is_timestamp_provider(header, self.config['timestamp'])
identifier_provider = is_identifier_provider(header, self.config['identifier'])
value_parsers = [
(
itemgetter(header.index(column['name'])),
registry[column['data_type']].string_parser(column.get('parser_config', {"null_value": ""})),
)
for column in self.config['columns']
]
trend_descriptors = [
Trend.Descriptor(column['name'], registry['text'], '')
for column in self.config['columns']
]
entity_type_name = self.config['entity_type']
granularity = create_granularity(self.config['granularity'])
entity_ref_type = entity_name_ref_class(entity_type_name)
def get_entity_type_name(data_package):
return entity_type_name
data_package_type = DataPackageType(
entity_type_name, entity_ref_type, get_entity_type_name
)
rows = (
(
identifier_provider(row),
timestamp_provider(row),
tuple(
parse_value(value_parser, get_value, row)
for get_value, value_parser in value_parsers
)
)
for row in csv_reader
)
chunk_size = self.config.get('chunk_size', DEFAULT_CHUNK_SIZE)
for chunk in chunked(rows, chunk_size):
yield DataPackage(
data_package_type, granularity,
trend_descriptors, chunk
)
def chunked(iterable, size: int):
"""
Return a generator of chunks (lists) of length size until
:param iterable: the iterable that will be chunked
:param size: the chunk size
:return:
"""
iterator = iter(iterable)
for first in iterator:
yield list(chain([first], islice(iterator, size - 1)))
class ParseError(Exception):
pass
def parse_value(value_parser, get_value, row):
"""
Parse a value from a row and provide context if an error occurs.
:param value_parser:
:param get_value:
:param row:
:return:
"""
raw_value = get_value(row)
try:
value = value_parser(raw_value)
except Exception as exc:
raise ParseError(f"Error parsing value '{raw_value}': {exc}")
return value
def is_timestamp_provider(header, name):
if name == 'current_timestamp':
timestamp = datetime.datetime.now()
def f(*args):
return timestamp
return f
else:
if name not in header:
raise ConfigurationError(f"No column named '{name}' specified in header")
column_index = header.index(name)
def f(row):
value = row[column_index]
timestamp = dateutil.parser.parse(value)
return timestamp
return f
def is_identifier_provider(header, name):
if name not in header:
raise ConfigurationError(f"No column named '{name}' specified in header")
else:
return itemgetter(header.index(name))
class AliasRef:
def map_to_entity_ids(self, aliases):
def map_to(cursor):
return range(len(aliases))
return map_to
def fixed_type(name):
def get_type(*args, **kwargs):
return name
return get_type
| gpl-3.0 | 3,122,415,599,228,333,000 | 25.524096 | 109 | 0.614354 | false |
markroxor/gensim | gensim/test/test_keyedvectors.py | 1 | 4900 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <[email protected]>
# Copyright (C) 2017 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking the poincare module from the models package.
"""
import logging
import unittest
import numpy as np
from gensim.models.keyedvectors import EuclideanKeyedVectors
from gensim.test.utils import datapath
logger = logging.getLogger(__name__)
class TestEuclideanKeyedVectors(unittest.TestCase):
def setUp(self):
self.vectors = EuclideanKeyedVectors.load_word2vec_format(
datapath('euclidean_vectors.bin'), binary=True, datatype=np.float64)
def test_most_similar(self):
"""Test most_similar returns expected results."""
expected = [
'conflict',
'administration',
'terrorism',
'call',
'israel'
]
predicted = [result[0] for result in self.vectors.most_similar('war', topn=5)]
self.assertEqual(expected, predicted)
def test_most_similar_topn(self):
"""Test most_similar returns correct results when `topn` is specified."""
self.assertEqual(len(self.vectors.most_similar('war', topn=5)), 5)
self.assertEqual(len(self.vectors.most_similar('war', topn=10)), 10)
predicted = self.vectors.most_similar('war', topn=None)
self.assertEqual(len(predicted), len(self.vectors.vocab))
def test_most_similar_raises_keyerror(self):
"""Test most_similar raises KeyError when input is out of vocab."""
with self.assertRaises(KeyError):
self.vectors.most_similar('not_in_vocab')
def test_most_similar_restrict_vocab(self):
"""Test most_similar returns handles restrict_vocab correctly."""
expected = set(self.vectors.index2word[:5])
predicted = set(result[0] for result in self.vectors.most_similar('war', topn=5, restrict_vocab=5))
self.assertEqual(expected, predicted)
def test_most_similar_with_vector_input(self):
"""Test most_similar returns expected results with an input vector instead of an input word."""
expected = [
'war',
'conflict',
'administration',
'terrorism',
'call',
]
input_vector = self.vectors['war']
predicted = [result[0] for result in self.vectors.most_similar([input_vector], topn=5)]
self.assertEqual(expected, predicted)
def test_most_similar_to_given(self):
"""Test most_similar_to_given returns correct results."""
predicted = self.vectors.most_similar_to_given('war', ['terrorism', 'call', 'waging'])
self.assertEqual(predicted, 'terrorism')
def test_similar_by_word(self):
"""Test similar_by_word returns expected results."""
expected = [
'conflict',
'administration',
'terrorism',
'call',
'israel'
]
predicted = [result[0] for result in self.vectors.similar_by_word('war', topn=5)]
self.assertEqual(expected, predicted)
def test_similar_by_vector(self):
"""Test similar_by_word returns expected results."""
expected = [
'war',
'conflict',
'administration',
'terrorism',
'call',
]
input_vector = self.vectors['war']
predicted = [result[0] for result in self.vectors.similar_by_vector(input_vector, topn=5)]
self.assertEqual(expected, predicted)
def test_distance(self):
"""Test that distance returns expected values."""
self.assertTrue(np.allclose(self.vectors.distance('war', 'conflict'), 0.06694602))
self.assertEqual(self.vectors.distance('war', 'war'), 0)
def test_similarity(self):
"""Test similarity returns expected value for two words, and for identical words."""
self.assertTrue(np.allclose(self.vectors.similarity('war', 'war'), 1))
self.assertTrue(np.allclose(self.vectors.similarity('war', 'conflict'), 0.93305397))
def test_words_closer_than(self):
"""Test words_closer_than returns expected value for distinct and identical nodes."""
self.assertEqual(self.vectors.words_closer_than('war', 'war'), [])
expected = set(['conflict', 'administration'])
self.assertEqual(set(self.vectors.words_closer_than('war', 'terrorism')), expected)
def test_rank(self):
"""Test rank returns expected value for distinct and identical nodes."""
self.assertEqual(self.vectors.rank('war', 'war'), 1)
self.assertEqual(self.vectors.rank('war', 'terrorism'), 3)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| lgpl-2.1 | -8,816,928,352,855,255,000 | 37.582677 | 107 | 0.633061 | false |
Lilykos/invenio | invenio/modules/deposit/testsuite/helpers.py | 13 | 5355 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import os
from invenio.testsuite import InvenioTestCase
from flask import url_for
class DepositionTestCase(InvenioTestCase):
"""
Helper class for easier testing of deposition types.
"""
def register(self, deposition_type):
""" Register the deposition type """
from invenio.modules.deposit.registry import deposit_types
from invenio.modules.deposit.url_converters import refresh_url_map
deposit_types.register(deposition_type)
assert deposition_type in deposit_types
self.deposition_type = deposition_type
refresh_url_map(self.app)
def unregister(self):
""" Unregister an already registered deposition type """
from invenio.modules.deposit.registry import deposit_types
from invenio.modules.deposit.url_converters import refresh_url_map
deposit_types.unregister(self.deposition_type)
assert self.deposition_type not in deposit_types
self.deposition_type = None
refresh_url_map(self.app)
def clear(self, deposition_type):
"""
Remove all traces of the specified deposition type
"""
from invenio.modules.workflows.models import Workflow, \
BibWorkflowObject, BibWorkflowObjectLog, BibWorkflowEngineLog
from invenio.ext.sqlalchemy import db
workflow_ids = map(
lambda x: x.uuid,
Workflow.query.filter_by(
module_name='webdeposit', name=deposition_type
).all()
)
if workflow_ids:
obj_ids = map(
lambda x: x.id,
BibWorkflowObject.query.filter(
BibWorkflowObject.id_workflow.in_(workflow_ids)
).all()
)
db.session.commit()
if obj_ids:
BibWorkflowObjectLog.query.filter(
BibWorkflowObjectLog.id_object.in_(obj_ids)
).delete(synchronize_session=False)
BibWorkflowEngineLog.query.filter(
BibWorkflowEngineLog.id_object.in_(workflow_ids)
).delete(synchronize_session=False)
BibWorkflowObject.query.filter(
BibWorkflowObject.id.in_(obj_ids)
).delete(synchronize_session=False)
Workflow.query.filter(
Workflow.uuid.in_(workflow_ids)
).delete(synchronize_session=False)
db.session.commit()
def run_task_id(self, task_id):
""" Run a bibsched task """
from invenio.modules.scheduler.models import SchTASK
CFG_BINDIR = self.app.config['CFG_BINDIR']
bibtask = SchTASK.query.filter(SchTASK.id == task_id).first()
assert bibtask is not None
assert bibtask.status == 'WAITING'
cmd = "%s/%s %s" % (CFG_BINDIR, bibtask.proc, task_id)
assert not os.system(cmd)
def run_tasks(self, alias=None):
"""
Run all background tasks matching parameters
"""
from invenio.modules.scheduler.models import SchTASK
q = SchTASK.query
if alias:
q = q.filter(SchTASK.user == alias, SchTASK.status == 'WAITING')
for r in q.all():
self.run_task_id(r.id)
def run_deposition_tasks(self, deposition_id, with_webcoll=True):
"""
Run all task ids specified in the latest SIP and optionally run
webcoll.
"""
# Run submitted tasks
from invenio.modules.deposit.models import Deposition
dep = Deposition.get(deposition_id)
sip = dep.get_latest_sip(sealed=True)
for task_id in sip.task_ids:
self.run_task_id(task_id)
if with_webcoll:
# Run webcoll (to ensure record is assigned permissions)
from invenio.legacy.bibsched.bibtask import \
task_low_level_submission
task_id = task_low_level_submission('webcoll', 'webdeposit', '-q')
self.run_task_id(task_id)
# Check if record is accessible
response = self.client.get(
url_for('record.metadata', recid=sip.metadata['recid']),
base_url=self.app.config['CFG_SITE_SECURE_URL'],
)
self.assertStatus(response, 200)
def create(self, deposition_type):
""" Create a deposition and return is deposition id """
res = self.client.get(url_for(
'webdeposit.create', deposition_type=deposition_type,
))
assert res.status_code == 302
return res.location.split("/")[-1]
| gpl-2.0 | 4,993,247,379,498,359,000 | 34.463576 | 78 | 0.622035 | false |
Northrend/mxnet | example/speech_recognition/stt_datagenerator.py | 44 | 13027 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
import json
import random
import numpy as np
from stt_utils import calc_feat_dim, spectrogram_from_file
from config_util import generate_file_path
from log_util import LogUtil
from label_util import LabelUtil
from stt_bi_graphemes_util import generate_bi_graphemes_label
from multiprocessing import cpu_count, Process, Manager
class DataGenerator(object):
def __init__(self, save_dir, model_name, step=10, window=20, max_freq=8000, desc_file=None):
"""
Params:
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
desc_file (str, optional): Path to a JSON-line file that contains
labels and paths to the audio files. If this is None, then
load metadata right away
"""
#calc_feat_dim returns int(0.001*window*max_freq)+1
super(DataGenerator, self).__init__()
# feat_dim=0.001*20*8000+1=161
self.feat_dim = calc_feat_dim(window, max_freq)
# 1d 161 length of array filled with zeros
self.feats_mean = np.zeros((self.feat_dim,))
# 1d 161 length of array filled with 1s
self.feats_std = np.ones((self.feat_dim,))
self.max_input_length = 0
self.max_length_list_in_batch = []
# 1d 161 length of array filled with random value
#[0.0, 1.0)
self.rng = random.Random()
if desc_file is not None:
self.load_metadata_from_desc_file(desc_file)
self.step = step
self.window = window
self.max_freq = max_freq
self.save_dir = save_dir
self.model_name = model_name
def get_meta_from_file(self, feats_mean, feats_std):
self.feats_mean = feats_mean
self.feats_std = feats_std
def featurize(self, audio_clip, overwrite=False, save_feature_as_csvfile=False):
""" For a given audio clip, calculate the log of its Fourier Transform
Params:
audio_clip(str): Path to the audio clip
"""
return spectrogram_from_file(
audio_clip, step=self.step, window=self.window,
max_freq=self.max_freq, overwrite=overwrite,
save_feature_as_csvfile=save_feature_as_csvfile)
def load_metadata_from_desc_file(self, desc_file, partition='train',
max_duration=16.0,):
""" Read metadata from the description file
(possibly takes long, depending on the filesize)
Params:
desc_file (str): Path to a JSON-line file that contains labels and
paths to the audio files
partition (str): One of 'train', 'validation' or 'test'
max_duration (float): In seconds, the maximum duration of
utterances to train or test on
"""
logger = LogUtil().getlogger()
logger.info('Reading description file: {} for partition: {}'
.format(desc_file, partition))
audio_paths, durations, texts = [], [], []
with open(desc_file) as json_line_file:
for line_num, json_line in enumerate(json_line_file):
try:
spec = json.loads(json_line)
if float(spec['duration']) > max_duration:
continue
audio_paths.append(spec['key'])
durations.append(float(spec['duration']))
texts.append(spec['text'])
except Exception as e:
# Change to (KeyError, ValueError) or
# (KeyError,json.decoder.JSONDecodeError), depending on
# json module version
logger.warn('Error reading line #{}: {}'
.format(line_num, json_line))
logger.warn(str(e))
if partition == 'train':
self.count = len(audio_paths)
self.train_audio_paths = audio_paths
self.train_durations = durations
self.train_texts = texts
elif partition == 'validation':
self.val_audio_paths = audio_paths
self.val_durations = durations
self.val_texts = texts
self.val_count = len(audio_paths)
elif partition == 'test':
self.test_audio_paths = audio_paths
self.test_durations = durations
self.test_texts = texts
else:
raise Exception("Invalid partition to load metadata. "
"Must be train/validation/test")
def load_train_data(self, desc_file, max_duration):
self.load_metadata_from_desc_file(desc_file, 'train', max_duration=max_duration)
def load_validation_data(self, desc_file, max_duration):
self.load_metadata_from_desc_file(desc_file, 'validation', max_duration=max_duration)
@staticmethod
def sort_by_duration(durations, audio_paths, texts):
return zip(*sorted(zip(durations, audio_paths, texts)))
def normalize(self, feature, eps=1e-14):
return (feature - self.feats_mean) / (self.feats_std + eps)
def get_max_label_length(self, partition, is_bi_graphemes=False):
if partition == 'train':
texts = self.train_texts + self.val_texts
elif partition == 'test':
texts = self.train_texts
else:
raise Exception("Invalid partition to load metadata. "
"Must be train/validation/test")
if is_bi_graphemes:
self.max_label_length = max([len(generate_bi_graphemes_label(text)) for text in texts])
else:
self.max_label_length = max([len(text) for text in texts])
return self.max_label_length
def get_max_seq_length(self, partition):
if partition == 'train':
audio_paths = self.train_audio_paths + self.val_audio_paths
durations = self.train_durations + self.val_durations
elif partition == 'test':
audio_paths = self.train_audio_paths
durations = self.train_durations
else:
raise Exception("Invalid partition to load metadata. "
"Must be train/validation/test")
max_duration_indexes = durations.index(max(durations))
max_seq_length = self.featurize(audio_paths[max_duration_indexes]).shape[0]
self.max_seq_length = max_seq_length
return max_seq_length
def prepare_minibatch(self, audio_paths, texts, overwrite=False,
is_bi_graphemes=False, seq_length=-1, save_feature_as_csvfile=False):
""" Featurize a minibatch of audio, zero pad them and return a dictionary
Params:
audio_paths (list(str)): List of paths to audio files
texts (list(str)): List of texts corresponding to the audio files
Returns:
dict: See below for contents
"""
assert len(audio_paths) == len(texts),\
"Inputs and outputs to the network must be of the same number"
# Features is a list of (timesteps, feature_dim) arrays
# Calculate the features for each audio clip, as the log of the
# Fourier Transform of the audio
features = [self.featurize(a, overwrite=overwrite, save_feature_as_csvfile=save_feature_as_csvfile) for a in audio_paths]
input_lengths = [f.shape[0] for f in features]
feature_dim = features[0].shape[1]
mb_size = len(features)
# Pad all the inputs so that they are all the same length
if seq_length == -1:
x = np.zeros((mb_size, self.max_seq_length, feature_dim))
else:
x = np.zeros((mb_size, seq_length, feature_dim))
y = np.zeros((mb_size, self.max_label_length))
labelUtil = LabelUtil.getInstance()
label_lengths = []
for i in range(mb_size):
feat = features[i]
feat = self.normalize(feat) # Center using means and std
x[i, :feat.shape[0], :] = feat
if is_bi_graphemes:
label = generate_bi_graphemes_label(texts[i])
label = labelUtil.convert_bi_graphemes_to_num(label)
y[i, :len(label)] = label
else:
label = labelUtil.convert_word_to_num(texts[i])
y[i, :len(texts[i])] = label
label_lengths.append(len(label))
return {
'x': x, # (0-padded features of shape(mb_size,timesteps,feat_dim)
'y': y, # list(int) Flattened labels (integer sequences)
'texts': texts, # list(str) Original texts
'input_lengths': input_lengths, # list(int) Length of each input
'label_lengths': label_lengths, # list(int) Length of each label
}
def iterate_test(self, minibatch_size=16):
return self.iterate(self.test_audio_paths, self.test_texts,
minibatch_size)
def iterate_validation(self, minibatch_size=16):
return self.iterate(self.val_audio_paths, self.val_texts,
minibatch_size)
def preprocess_sample_normalize(self, threadIndex, audio_paths, overwrite, return_dict):
if len(audio_paths) > 0:
audio_clip = audio_paths[0]
feat = self.featurize(audio_clip=audio_clip, overwrite=overwrite)
feat_squared = np.square(feat)
count = float(feat.shape[0])
dim = feat.shape[1]
if len(audio_paths) > 1:
for audio_path in audio_paths[1:]:
next_feat = self.featurize(audio_clip=audio_path, overwrite=overwrite)
next_feat_squared = np.square(next_feat)
feat_vertically_stacked = np.concatenate((feat, next_feat)).reshape(-1, dim)
feat = np.sum(feat_vertically_stacked, axis=0, keepdims=True)
feat_squared_vertically_stacked = np.concatenate(
(feat_squared, next_feat_squared)).reshape(-1, dim)
feat_squared = np.sum(feat_squared_vertically_stacked, axis=0, keepdims=True)
count += float(next_feat.shape[0])
return_dict[threadIndex] = {'feat': feat, 'feat_squared': feat_squared, 'count': count}
def sample_normalize(self, k_samples=1000, overwrite=False):
""" Estimate the mean and std of the features from the training set
Params:
k_samples (int): Use this number of samples for estimation
"""
log = LogUtil().getlogger()
log.info("Calculating mean and std from samples")
# if k_samples is negative then it goes through total dataset
if k_samples < 0:
audio_paths = self.audio_paths
# using sample
else:
k_samples = min(k_samples, len(self.train_audio_paths))
samples = self.rng.sample(self.train_audio_paths, k_samples)
audio_paths = samples
manager = Manager()
return_dict = manager.dict()
jobs = []
for threadIndex in range(cpu_count()):
proc = Process(target=self.preprocess_sample_normalize, args=(threadIndex, audio_paths, overwrite, return_dict))
jobs.append(proc)
proc.start()
for proc in jobs:
proc.join()
feat = np.sum(np.vstack([item['feat'] for item in return_dict.values()]), axis=0)
count = sum([item['count'] for item in return_dict.values()])
feat_squared = np.sum(np.vstack([item['feat_squared'] for item in return_dict.values()]), axis=0)
self.feats_mean = feat / float(count)
self.feats_std = np.sqrt(feat_squared / float(count) - np.square(self.feats_mean))
np.savetxt(
generate_file_path(self.save_dir, self.model_name, 'feats_mean'), self.feats_mean)
np.savetxt(
generate_file_path(self.save_dir, self.model_name, 'feats_std'), self.feats_std)
log.info("End calculating mean and std from samples")
| apache-2.0 | 8,733,191,609,085,622,000 | 45.691756 | 129 | 0.59891 | false |
govint/docker-volume-vsphere | esx_service/vmdk_ops.py | 3 | 83534 | #!/usr/bin/env python
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
ESX-side service handling VMDK requests from VMCI clients
The requests are JSON formatted.
All operations are using requester VM (docker host) datastore and
"Name" in request refers to vmdk basename
VMDK name is formed as [vmdatastore] dockvols/"Name".vmdk
Commands ("cmd" in request):
"create" - create a VMDK in "[vmdatastore] dvol"
"remove" - remove a VMDK. We assume it's not open, and fail if it is
"list" - enumerate VMDKs
"get" - get info about an individual volume (vmdk)
"attach" - attach a VMDK to the requesting VM
"detach" - detach a VMDK from the requesting VM (assuming it's unmounted)
"version" - get the ESX service version string
'''
import atexit
import getopt
import json
import logging
import os
import os.path
import re
import signal
import subprocess
import sys
import traceback
import time
from ctypes import *
from vmware import vsi
import pyVim
from pyVim.connect import Connect, Disconnect
from pyVim import vmconfig
from pyVmomi import VmomiSupport, vim, vmodl
from pyVmomi.VmomiSupport import newestVersions
sys.dont_write_bytecode = True
# Location of utils used by the plugin.
TOP_DIR = "/usr/lib/vmware/vmdkops"
BIN_LOC = os.path.join(TOP_DIR, "bin")
LIB_LOC = os.path.join(TOP_DIR, "lib")
LIB_LOC64 = os.path.join(TOP_DIR, "lib64")
PY_LOC = os.path.join(TOP_DIR, "Python")
PY2_LOC = os.path.join(PY_LOC, "2")
# We won't accept names longer than that
MAX_VOL_NAME_LEN = 100
MAX_DS_NAME_LEN = 100
# Characters not acceptable in volume name
ILLEGAL_CHARACTERS = {'/', '\\'}
# vmdkops python utils are in PY_LOC, so insert to path ahead of other stuff
sys.path.insert(0, PY_LOC)
# if we are on Python 2, add py2-only stuff as a fallback
if sys.version_info.major == 2:
sys.path.append(PY2_LOC)
import threading
import threadutils
import log_config
import volume_kv as kv
import vmdk_utils
import vsan_policy
import vsan_info
import auth
import sqlite3
import convert
import auth_data_const
import auth_api
import error_code
from error_code import ErrorCode
from error_code import error_code_to_message
import vm_listener
import counter
# Python version 3.5.1
PYTHON64_VERSION = 50659824
# External tools used by the plugin.
OBJ_TOOL_CMD = "/usr/lib/vmware/osfs/bin/objtool open -u "
OSFS_MKDIR_CMD = "/usr/lib/vmware/osfs/bin/osfs-mkdir -n "
# Defaults
DOCK_VOLS_DIR = "dockvols" # place in the same (with Docker VM) datastore
MAX_JSON_SIZE = 1024 * 4 # max buf size for query json strings. Queries are limited in size
MAX_SKIP_COUNT = 16 # max retries on VMCI Get Ops failures
VMDK_ADAPTER_TYPE = 'busLogic' # default adapter type
# Server side understand protocol version. If you are changing client/server protocol we use
# over VMCI, PLEASE DO NOT FORGET TO CHANGE IT FOR CLIENT in file <esx_vmdkcmd.go> !
SERVER_PROTOCOL_VERSION = 2
# Error codes
VMCI_ERROR = -1 # VMCI C code uses '-1' to indicate failures
ECONNABORTED = 103 # Error on non privileged client
# Volume data returned on Get request
CAPACITY = 'capacity'
SIZE = 'size'
ALLOCATED = 'allocated'
LOCATION = 'datastore'
CREATED_BY_VM = 'created by VM'
ATTACHED_TO_VM = 'attached to VM'
# Virtual machine power states
VM_POWERED_OFF = "poweredOff"
# Maximum number of PVSCSI targets
PVSCSI_MAX_TARGETS = 16
# Service instance provide from connection to local hostd
_service_instance = None
# VMCI library used to communicate with clients
lib = None
# For managing resource locks.
lockManager = threadutils.LockManager()
# Barrier indicating whether stop has been requested
stopBarrier = False
# Counter of operations in flight
opsCounter = counter.OpsCounter()
# Timeout setting for waiting all in-flight ops drained
WAIT_OPS_TIMEOUT = 20
# PCI bus and function number bits and mask, used on the slot number.
PCI_BUS_BITS = 5
PCI_BUS_MASK = 31
PCI_FUNC_BITS = 10
PCI_FUNC_MASK = 7
# Run executable on ESX as needed.
# Returns int with return value, and a string with either stdout (on success) or stderr (on error)
def RunCommand(cmd):
"""RunCommand
Runs command specified by user
@param command to execute
"""
logging.debug("Running cmd %s", cmd)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=True)
o, e = p.communicate()
s = p.returncode
if s != 0:
return (s, e)
return (s, o)
# returns error, or None for OK
# opts is dictionary of {option: value}.
# for now we care about size and (maybe) policy
def createVMDK(vmdk_path, vm_name, vol_name,
opts={}, vm_uuid=None, tenant_uuid=None, datastore_url=None, vm_datastore_url=None, vm_datastore=None):
logging.info("*** createVMDK: %s opts=%s vm_name=%s vm_uuid=%s tenant_uuid=%s datastore_url=%s",
vmdk_path, opts, vm_name, vm_uuid, tenant_uuid, datastore_url)
if os.path.isfile(vmdk_path):
# We are mostly here due to race or Plugin VMCI retry #1076
msg = "File {0} already exists".format(vmdk_path)
logging.warning(msg)
return err(msg)
try:
validate_opts(opts, vmdk_path)
except ValidationError as e:
return err(e.msg)
if kv.CLONE_FROM in opts:
return cloneVMDK(vm_name=vm_name,
vmdk_path=vmdk_path,
opts=opts,
vm_uuid=vm_uuid,
datastore_url=datastore_url,
vm_datastore_url=vm_datastore_url,
vm_datastore=vm_datastore)
if not kv.DISK_ALLOCATION_FORMAT in opts:
disk_format = kv.DEFAULT_ALLOCATION_FORMAT
# Update opts with DISK_ALLOCATION_FORMAT for volume metadata
opts[kv.DISK_ALLOCATION_FORMAT] = kv.DEFAULT_ALLOCATION_FORMAT
else:
disk_format = kv.VALID_ALLOCATION_FORMATS[opts[kv.DISK_ALLOCATION_FORMAT]]
# VirtualDiskSpec
vdisk_spec = vim.VirtualDiskManager.FileBackedVirtualDiskSpec()
vdisk_spec.adapterType = VMDK_ADAPTER_TYPE
vdisk_spec.diskType = disk_format
if kv.SIZE in opts:
vdisk_spec.capacityKb = convert.convert_to_KB(opts[kv.SIZE])
else:
vdisk_spec.capacityKb = convert.convert_to_KB(kv.DEFAULT_DISK_SIZE)
# Form datastore path from vmdk_path
volume_datastore_path = vmdk_utils.get_datastore_path(vmdk_path)
logging.debug("volume_datastore_path=%s", volume_datastore_path)
si = get_si()
task = si.content.virtualDiskManager.CreateVirtualDisk(
name=volume_datastore_path, spec=vdisk_spec)
try:
wait_for_tasks(si, [task])
except vim.fault.VimFault as ex:
return err("Failed to create volume: {0}".format(ex.msg))
logging.debug("Successfully created %s volume", vmdk_path)
# Handle vsan policy
if kv.VSAN_POLICY_NAME in opts:
# Attempt to set policy to vmdk
# set_policy_to_vmdk() deleted vmdk if couldn't set policy
set_err = set_policy_to_vmdk(vmdk_path=vmdk_path,
opts=opts,
vol_name=vol_name)
if set_err:
return set_err
if not create_kv_store(vm_name, vmdk_path, opts):
msg = "Failed to create metadata kv store for {0}".format(vmdk_path)
logging.warning(msg)
error_info = err(msg)
clean_err = cleanVMDK(vmdk_path=vmdk_path,
vol_name=vol_name)
if clean_err:
logging.warning("Failed to clean %s file: %s", vmdk_path, clean_err)
error_info = error_info + clean_err
return error_info
# create succeed, insert the volume information into "volumes" table
if tenant_uuid:
vol_size_in_MB = convert.convert_to_MB(auth.get_vol_size(opts))
auth.add_volume_to_volumes_table(tenant_uuid, datastore_url, vol_name, vol_size_in_MB)
else:
logging.debug(error_code_to_message[ErrorCode.VM_NOT_BELONG_TO_TENANT].format(vm_name))
def cloneVMDK(vm_name, vmdk_path, opts={}, vm_uuid=None, datastore_url=None, vm_datastore_url=None, vm_datastore=None):
logging.info("*** cloneVMDK: %s opts = %s vm_uuid=%s datastore_url=%s vm_datastore_url=%s vm_datastore=%s",
vmdk_path, opts, vm_uuid, datastore_url, vm_datastore_url, vm_datastore)
# Get source volume path for cloning
error_info, tenant_uuid, tenant_name = auth.get_tenant(vm_uuid)
if error_info:
return err(error_info)
try:
src_volume, src_datastore = parse_vol_name(opts[kv.CLONE_FROM])
except ValidationError as ex:
return err(str(ex))
if not src_datastore:
src_datastore_url = datastore_url
src_datastore = vmdk_utils.get_datastore_name(datastore_url)
elif not vmdk_utils.validate_datastore(src_datastore):
return err("Invalid datastore '%s'.\n" \
"Known datastores: %s.\n" \
"Default datastore_url: %s" \
% (src_datastore, ", ".join(get_datastore_names_list()), datastore_url))
else:
src_datastore_url = vmdk_utils.get_datastore_url(src_datastore)
error_info = authorize_check(vm_uuid=vm_uuid,
datastore_url=src_datastore_url,
datastore=src_datastore,
cmd=auth.CMD_ATTACH,
opts={},
use_default_ds=False,
vm_datastore_url=vm_datastore_url,
vm_datastore=vm_datastore)
if error_info:
errmsg = "Failed to authorize VM: {0}, datastore: {1}".format(error_info, src_datastore)
logging.warning("*** cloneVMDK: %s", errmsg)
return err(errmsg)
src_path, errMsg = get_vol_path(src_datastore, tenant_name)
if src_path is None:
return err("Failed to initialize source volume path {0}: {1}".format(src_path, errMsg))
src_vmdk_path = vmdk_utils.get_vmdk_path(src_path, src_volume)
logging.debug("cloneVMDK: src path=%s vol=%s vmdk_path=%s", src_path, src_volume, src_vmdk_path)
if not os.path.isfile(src_vmdk_path):
return err("Could not find volume for cloning %s" % opts[kv.CLONE_FROM])
# Form datastore path from vmdk_path
dest_vol = vmdk_utils.get_datastore_path(vmdk_path)
source_vol = vmdk_utils.get_datastore_path(src_vmdk_path)
lockname = "{}.{}.{}".format(src_datastore, tenant_name, src_volume)
with lockManager.get_lock(lockname):
# Verify if the source volume is in use.
attached, uuid, attach_as, attached_vm_name = getStatusAttached(src_vmdk_path)
if attached:
log_attached_volume(vmdk_path, uuid, attached_vm_name)
# Reauthorize with size info of the volume being cloned
src_vol_info = kv.get_vol_info(src_vmdk_path)
datastore = vmdk_utils.get_datastore_from_vmdk_path(vmdk_path)
datastore_url = vmdk_utils.get_datastore_url(datastore)
opts["size"] = src_vol_info["size"]
error_info = authorize_check(vm_uuid=vm_uuid,
datastore_url=datastore_url,
datastore=datastore,
cmd=auth.CMD_CREATE,
opts=opts,
use_default_ds=False,
vm_datastore_url=vm_datastore_url,
vm_datastore=vm_datastore)
if error_info:
return err(error_info)
# Handle the allocation format
if not kv.DISK_ALLOCATION_FORMAT in opts:
disk_format = kv.DEFAULT_ALLOCATION_FORMAT
# Update opts with DISK_ALLOCATION_FORMAT for volume metadata
opts[kv.DISK_ALLOCATION_FORMAT] = kv.DEFAULT_ALLOCATION_FORMAT
else:
disk_format = kv.VALID_ALLOCATION_FORMATS[opts[kv.DISK_ALLOCATION_FORMAT]]
# VirtualDiskSpec
vdisk_spec = vim.VirtualDiskManager.VirtualDiskSpec()
vdisk_spec.adapterType = VMDK_ADAPTER_TYPE
vdisk_spec.diskType = disk_format
# Clone volume
si = get_si()
task = si.content.virtualDiskManager.CopyVirtualDisk(
sourceName=source_vol, destName=dest_vol, destSpec=vdisk_spec)
try:
wait_for_tasks(si, [task])
except vim.fault.VimFault as ex:
return err("Failed to clone volume: {0}".format(ex.msg))
vol_name = vmdk_utils.strip_vmdk_extension(src_vmdk_path.split("/")[-1])
# Fix up the KV for the destination
if not kv.fixup_kv(src_vmdk_path, vmdk_path):
msg = ("Failed to create volume KV for %s" % vol_name)
logging.warning(msg)
error_info = err(msg)
clean_err = cleanVMDK(vmdk_path=vmdk_path, vol_name=vol_name)
if clean_err:
logging.warning("Failed to clean %s file: %s", vmdk_path, clean_err)
error_info = error_info + clean_err
return error_info
# Handle vsan policy
if kv.VSAN_POLICY_NAME in opts:
# Attempt to set policy to vmdk
# set_policy_to_vmdk() deleted vmdk if couldn't set policy
set_err = set_policy_to_vmdk(vmdk_path=vmdk_path,
opts=opts,
vol_name=vol_name)
if set_err:
return set_err
# Update volume meta
vol_meta = kv.getAll(vmdk_path)
vol_meta[kv.CREATED_BY] = vm_name
vol_meta[kv.CREATED] = time.asctime(time.gmtime())
vol_meta[kv.VOL_OPTS][kv.CLONE_FROM] = src_volume
vol_meta[kv.VOL_OPTS][kv.DISK_ALLOCATION_FORMAT] = opts[kv.DISK_ALLOCATION_FORMAT]
if kv.ACCESS in opts:
vol_meta[kv.VOL_OPTS][kv.ACCESS] = opts[kv.ACCESS]
if kv.ATTACH_AS in opts:
vol_meta[kv.VOL_OPTS][kv.ATTACH_AS] = opts[kv.ATTACH_AS]
if not kv.setAll(vmdk_path, vol_meta):
msg = "Failed to create metadata kv store for {0}".format(vmdk_path)
logging.warning(msg)
removeVMDK(vmdk_path)
return err(msg)
def create_kv_store(vm_name, vmdk_path, opts):
""" Create the metadata kv store for a volume """
vol_meta = {kv.STATUS: kv.DETACHED,
kv.VOL_OPTS: opts,
kv.CREATED: time.asctime(time.gmtime()),
kv.CREATED_BY: vm_name}
return kv.create(vmdk_path, vol_meta)
def validate_opts(opts, vmdk_path):
"""
Validate available options. Current options are:
* size - The size of the disk to create
* vsan-policy-name - The name of an existing policy to use
* diskformat - The allocation format of allocated disk
"""
valid_opts = [kv.SIZE, kv.VSAN_POLICY_NAME, kv.DISK_ALLOCATION_FORMAT,
kv.ATTACH_AS, kv.ACCESS, kv.FILESYSTEM_TYPE, kv.CLONE_FROM]
defaults = [kv.DEFAULT_DISK_SIZE, kv.DEFAULT_VSAN_POLICY,\
kv.DEFAULT_ALLOCATION_FORMAT, kv.DEFAULT_ATTACH_AS,\
kv.DEFAULT_ACCESS, kv.DEFAULT_FILESYSTEM_TYPE, kv.DEFAULT_CLONE_FROM]
invalid = frozenset(opts.keys()).difference(valid_opts)
if len(invalid) != 0:
msg = 'Invalid options: {0} \n'.format(list(invalid)) \
+ 'Valid options and defaults: ' \
+ '{0}'.format(list(zip(list(valid_opts), defaults)))
raise ValidationError(msg)
# For validation of clone (in)compatible options
clone = True if kv.CLONE_FROM in opts else False
if kv.SIZE in opts:
validate_size(opts[kv.SIZE], clone)
if kv.VSAN_POLICY_NAME in opts:
validate_vsan_policy_name(opts[kv.VSAN_POLICY_NAME], vmdk_path)
if kv.DISK_ALLOCATION_FORMAT in opts:
validate_disk_allocation_format(opts[kv.DISK_ALLOCATION_FORMAT])
if kv.ATTACH_AS in opts:
validate_attach_as(opts[kv.ATTACH_AS])
if kv.ACCESS in opts:
validate_access(opts[kv.ACCESS])
if kv.FILESYSTEM_TYPE in opts:
validate_fstype(opts[kv.FILESYSTEM_TYPE], clone)
def validate_size(size, clone=False):
"""
Ensure size is given in a human readable format <int><unit> where int is an
integer and unit is either 'mb', 'gb', or 'tb'. e.g. 22mb
"""
if clone:
raise ValidationError("Cannot define the size for a clone")
if not size.lower().endswith(('mb', 'gb', 'tb'
)) or not size[:-2].isdigit():
msg = ('Invalid format for size. \n'
'Valid sizes must be of form X[mMgGtT]b where X is an '
'integer. Default = 100mb')
raise ValidationError(msg)
def validate_vsan_policy_name(policy_name, vmdk_path):
"""
Ensure that the policy file exists
"""
if not vsan_info.is_on_vsan(vmdk_path):
raise ValidationError('Cannot use a VSAN policy on a non-VSAN datastore')
if not vsan_policy.policy_exists(policy_name):
err_msg = 'Policy {0} does not exist.'.format(policy_name)
# If valid policies exist, append their names along with error message
# for available policy names that can be used
avail_policies = vsan_policy.get_policies()
if avail_policies:
avail_msg = ' Available policies are: {0}'.format(list(avail_policies.keys()))
err_msg = err_msg + avail_msg
raise ValidationError(err_msg)
def set_policy_to_vmdk(vmdk_path, opts, vol_name=None):
"""
Set VSAN policy to the vmdk object
If failed, delete the vmdk file and return the error info to be displayed
on client
"""
out = vsan_policy.set_policy_by_name(vmdk_path, opts[kv.VSAN_POLICY_NAME])
if out:
# If policy is incompatible/wrong, return the error and delete the vmdk_path
msg = ("Failed to create volume %s: %s" % (vol_name, out))
logging.warning(msg)
error_info = err(msg)
clean_err = cleanVMDK(vmdk_path=vmdk_path,
vol_name=vol_name)
if clean_err:
logging.warning("Failed to clean %s file: %s", vmdk_path, clean_err)
error_info = error_info + clean_err
return error_info
return None
def validate_disk_allocation_format(alloc_format):
"""
Ensure format is valid.
"""
if not alloc_format in kv.VALID_ALLOCATION_FORMATS :
raise ValidationError("Disk Allocation Format \'{0}\' is not supported."
" Valid options are: {1}.".format(
alloc_format, list(kv.VALID_ALLOCATION_FORMATS)))
def validate_attach_as(attach_type):
"""
Ensure that we recognize the attach type
"""
if not attach_type in kv.ATTACH_AS_TYPES :
raise ValidationError("Attach type '{0}' is not supported."
" Valid options are: {1}".format(attach_type, kv.ATTACH_AS_TYPES))
def validate_access(access_type):
"""
Ensure that we recognize the access type
"""
if not access_type in kv.ACCESS_TYPES :
raise ValidationError("Access type '{0}' is not supported."
" Valid options are: {1}".format(access_type,
kv.ACCESS_TYPES))
def validate_fstype(fstype, clone=False):
"""
Ensure that we don't accept fstype for a clone
"""
if clone:
raise ValidationError("Cannot define the filesystem type for a clone")
# Returns the UUID if the vmdk_path is for a VSAN backed.
def get_vsan_uuid(vmdk_path):
f = open(vmdk_path)
data = f.read()
f.close()
# For now we look for a VSAN URI, later vvol.
exp = re.compile("RW .* VMFS \"vsan:\/\/(.*)\"")
try:
return exp.search(data).group(1)
except:
return None
# Return volume ingo
def vol_info(vol_meta, vol_size_info, datastore):
vinfo = {CREATED_BY_VM : vol_meta[kv.CREATED_BY],
kv.CREATED : vol_meta[kv.CREATED],
kv.STATUS : vol_meta[kv.STATUS]}
vinfo[CAPACITY] = {}
vinfo[CAPACITY][SIZE] = vol_size_info[SIZE]
vinfo[CAPACITY][ALLOCATED] = vol_size_info[ALLOCATED]
vinfo[LOCATION] = datastore
if kv.ATTACHED_VM_UUID in vol_meta:
vm_name = vm_uuid2name(vol_meta[kv.ATTACHED_VM_UUID])
if vm_name:
vinfo[ATTACHED_TO_VM] = vm_name
elif kv.ATTACHED_VM_NAME in vol_meta:
# If vm name couldn't be retrieved through uuid, use name from KV
vinfo[ATTACHED_TO_VM] = vol_meta[kv.ATTACHED_VM_NAME]
else:
vinfo[ATTACHED_TO_VM] = vol_meta[kv.ATTACHED_VM_UUID]
if kv.ATTACHED_VM_DEV in vol_meta:
vinfo[kv.ATTACHED_VM_DEV] = vol_meta[kv.ATTACHED_VM_DEV]
if kv.VOL_OPTS in vol_meta:
if kv.FILESYSTEM_TYPE in vol_meta[kv.VOL_OPTS]:
vinfo[kv.FILESYSTEM_TYPE] = vol_meta[kv.VOL_OPTS][kv.FILESYSTEM_TYPE]
if kv.VSAN_POLICY_NAME in vol_meta[kv.VOL_OPTS]:
vinfo[kv.VSAN_POLICY_NAME] = vol_meta[kv.VOL_OPTS][kv.VSAN_POLICY_NAME]
if kv.DISK_ALLOCATION_FORMAT in vol_meta[kv.VOL_OPTS]:
vinfo[kv.DISK_ALLOCATION_FORMAT] = vol_meta[kv.VOL_OPTS][kv.DISK_ALLOCATION_FORMAT]
else:
vinfo[kv.DISK_ALLOCATION_FORMAT] = kv.DEFAULT_ALLOCATION_FORMAT
if kv.ATTACH_AS in vol_meta[kv.VOL_OPTS]:
vinfo[kv.ATTACH_AS] = vol_meta[kv.VOL_OPTS][kv.ATTACH_AS]
else:
vinfo[kv.ATTACH_AS] = kv.DEFAULT_ATTACH_AS
if kv.ACCESS in vol_meta[kv.VOL_OPTS]:
vinfo[kv.ACCESS] = vol_meta[kv.VOL_OPTS][kv.ACCESS]
else:
vinfo[kv.ACCESS] = kv.DEFAULT_ACCESS
if kv.CLONE_FROM in vol_meta[kv.VOL_OPTS]:
vinfo[kv.CLONE_FROM] = vol_meta[kv.VOL_OPTS][kv.CLONE_FROM]
else:
vinfo[kv.CLONE_FROM] = kv.DEFAULT_CLONE_FROM
return vinfo
def cleanVMDK(vmdk_path, vol_name=None):
"""
Delete the vmdk file. Retry if the attempt fails
Invoked as a part of removeVMDK procedure and
cases requiring deletion of vmdk file only (when meta file
hasn't been generated)
eg: Unsuccesful attempt to apply vsan policy and when failed
to create metadata for vmdk_path
"""
logging.info("*** cleanVMDK: %s", vmdk_path)
# Form datastore path from vmdk_path
volume_datastore_path = vmdk_utils.get_datastore_path(vmdk_path)
retry_count = 0
vol_meta = kv.getAll(vmdk_path)
kv.delete(vmdk_path)
while True:
si = get_si()
task = si.content.virtualDiskManager.DeleteVirtualDisk(name=volume_datastore_path)
try:
# Wait for delete, exit loop on success
wait_for_tasks(si, [task])
break
except vim.fault.FileNotFound as ex:
logging.warning("*** removeVMDK: File not found error: %s", ex.msg)
return None
except vim.fault.VimFault as ex:
if retry_count == vmdk_utils.VMDK_RETRY_COUNT or "Error caused by file" not in ex.msg:
kv.create(vmdk_path, vol_meta)
return err("Failed to remove volume: {0}".format(ex.msg))
else:
logging.warning("*** removeVMDK: Retrying removal on error: %s", ex.msg)
vmdk_utils.log_volume_lsof(vol_name)
retry_count += 1
time.sleep(vmdk_utils.VMDK_RETRY_SLEEP)
return None
# Return error, or None for OK
def removeVMDK(vmdk_path, vol_name=None, vm_name=None, tenant_uuid=None, datastore_url=None):
"""
Checks the status of the vmdk file using its meta file
If it is not attached, then cleans(deletes) the vmdk file.
If clean is successful, delete the volume from volume table
"""
logging.info("*** removeVMDK: %s", vmdk_path)
# Check the current volume status
kv_status_attached, kv_uuid, attach_mode, attached_vm_name = getStatusAttached(vmdk_path)
if kv_status_attached:
if vol_name is None:
vol_name = vmdk_utils.get_volname_from_vmdk_path(vmdk_path)
logging.info("*** removeVMDK: %s is in use, volume = %s VM = %s VM-uuid = %s",
vmdk_path, vol_name, attached_vm_name, kv_uuid)
return err("Failed to remove volume {0}, in use by VM = {1}.".format(vol_name, attached_vm_name))
# Cleaning .vmdk file
clean_err = cleanVMDK(vmdk_path, vol_name)
if clean_err:
logging.warning("Failed to clean %s file: %s", vmdk_path, clean_err)
return clean_err
# clean succeeded, remove infomation of this volume from volumes table
if tenant_uuid:
error_info = auth.remove_volume_from_volumes_table(tenant_uuid, datastore_url, vol_name)
return error_info
elif not vm_name:
logging.debug(error_code_to_message[ErrorCode.VM_NOT_BELONG_TO_TENANT].format(vm_name))
return None
def getVMDK(vmdk_path, vol_name, datastore):
"""Checks if the volume exists, and returns error if it does not"""
# Note: will return more Volume info here, when Docker API actually accepts it
logging.debug("getVMDK: vmdk_path=%s vol_name=%s, datastore=%s", vmdk_path, vol_name, datastore)
file_exist = os.path.isfile(vmdk_path)
logging.debug("getVMDK: file_exist=%d", file_exist)
if not os.path.isfile(vmdk_path):
return err("Volume {0} not found (file: {1})".format(vol_name, vmdk_path))
# Return volume info - volume policy, size, allocated capacity, allocation
# type, creat-by, create time.
try:
result = vol_info(kv.getAll(vmdk_path),
kv.get_vol_info(vmdk_path),
datastore)
except Exception as ex:
logging.error("Failed to get disk details for %s (%s)" % (vmdk_path, ex))
return None
return result
def listVMDK(tenant):
"""
Returns a list of volume names (note: may be an empty list).
Each volume name is returned as either `volume@datastore`, or just `volume`
for volumes on vm_datastore
"""
vmdk_utils.init_datastoreCache(force=True)
vmdks = vmdk_utils.get_volumes(tenant)
# build fully qualified vol name for each volume found
return [{u'Name': get_full_vol_name(x['filename'], x['datastore']),
u'Attributes': {}} \
for x in vmdks]
def findVmByUuid(vm_uuid, is_vc_uuid=False):
"""
Find VM by vm_uuid.
is_vc_uuid should be true if vm_uuid is vc uuid, else it should be false.
Return VM managed object, reconnect if needed. Throws if connection fails twice.
Returns None if the uuid is not found
"""
si = get_si()
vm = si.content.searchIndex.FindByUuid(None, vm_uuid, True, is_vc_uuid)
return vm
def findVmByUuidChoice(bios_uuid, vc_uuid):
"""
Returns vm object based on either vc_uuid, or bios_uuid.
Returns None if failed to find.
"""
vm = None
if vc_uuid:
vm = findVmByUuid(vc_uuid, True)
if not vm: # either vc_uuid is not even passed, or we failed to find the VM by VC uuid:
if vc_uuid:
logging.info("Failed to find VM by VC UUID %s, trying BIOS UUID %s", vc_uuid, bios_uuid)
vm = findVmByUuid(bios_uuid, False)
if not vm: # can't find VM by VC or BIOS uuid
logging.error("Failed to find VM by BIOS UUID either.")
return None
logging.info("Found vm name='%s'", vm.config.name)
return vm
def vm_uuid2name(vm_uuid):
vm = findVmByUuidChoice(vm_uuid, vm_uuid)
if not vm or not vm.config:
return None
return vm.config.name
def attachVMDK(vmdk_path, vm_name, bios_uuid, vc_uuid):
return apply_action_VMDK(disk_attach, vmdk_path, vm_name, bios_uuid, vc_uuid)
def detachVMDK(vmdk_path, vm_name, bios_uuid, vc_uuid):
return apply_action_VMDK(disk_detach, vmdk_path, vm_name, bios_uuid, vc_uuid)
def apply_action_VMDK(action, vmdk_path, vm_name, bios_uuid, vc_uuid):
# note: vc_uuid is the last one to avoid reworkign tests which use positional args and
# not aware of vc_uuid
"""Finds the VM and applies action(path,vm_MO) to it.
Returns json reply from action to pass upstairs, or json with 'err'"""
logging.info("*** %s: VMDK %s to VM '%s' , bios uuid = %s, VC uuid=%s)",
action.__name__, vmdk_path, vm_name, bios_uuid, vc_uuid)
vm = findVmByUuidChoice(bios_uuid, vc_uuid)
vcuuid = 'None'
if vc_uuid:
vcuuid = vc_uuid
if not vm: # can't find VM by VC or BIOS uuid
return err("Failed to find VM object for %s (bios %s vc %s)" % (vm_name, bios_uuid, vcuuid))
if vm.config.name != vm_name:
logging.warning("vm_name from vSocket '%s' does not match VM object '%s' ", vm_name, vm.config.name)
return action(vmdk_path, vm)
def get_vol_path(datastore, tenant_name=None, create=True):
"""
Check existence (and create if needed) the path for docker volume VMDKs
Returns either path to tenant-specific folder (if tenant name is passed)
or path to dockvol.
"""
# If tenant_name is set to None, the folder for Docker
# volumes is created on <datastore>/DOCK_VOLS_DIR
# If tenant_name is set, the folder for Dock volume
# is created on <datastore>/DOCK_VOLS_DIR/tenant_uuid
# a symlink <datastore>/DOCK_VOLS_DIR/tenant_name will be created to point to
# path <datastore>/DOCK_VOLS_DIR/tenant_uuid
# If the dock volume folder already exists,
# the path returned contains tenant name not UUID.
# This is to make logs more readable. OS will resolve this path
# as a symlink with tenant_name will already be present.
readable_path = path = dock_vol_path = os.path.join("/vmfs/volumes", datastore, DOCK_VOLS_DIR)
if tenant_name:
error_info, tenant = auth_api.get_tenant_from_db(tenant_name)
if error_info:
logging.error("get_vol_path: failed to find tenant info for tenant %s", tenant_name)
path = dock_vol_path
path = os.path.join(dock_vol_path, tenant.id)
readable_path = os.path.join(dock_vol_path, tenant_name)
if os.path.isdir(path):
# If the readable_path exists then return, else return path with no symlinks
if os.path.exists(readable_path):
logging.debug("Found %s, returning", readable_path)
return readable_path, None
else:
logging.warning("Internal: Tenant name symlink not found for path %s", readable_path)
logging.debug("Found %s, returning", path)
return path, None
if not create:
# Return the readable path to caller without creating it.
logging.debug("Returning %s, path isn't created yet.", readable_path)
return readable_path, None
if not os.path.isdir(dock_vol_path):
# The osfs tools are usable for DOCK_VOLS_DIR on all datastores.
cmd = "{} '{}'".format(OSFS_MKDIR_CMD, dock_vol_path)
logging.info("Creating %s, running '%s'", dock_vol_path, cmd)
rc, out = RunCommand(cmd)
if rc != 0:
errMsg = "{0} creation failed - {1} on datastore {2}".format(DOCK_VOLS_DIR, os.strerror(rc), datastore)
logging.warning(errMsg)
return None, err(errMsg)
if tenant_name and not os.path.isdir(path):
# The mkdir command is used to create "tenant_name" folder inside DOCK_VOLS_DIR on "datastore"
logging.info("Creating directory %s", path)
try:
os.mkdir(path)
except Exception as ex:
errMsg = "Failed to initialize volume path {} - {}".format(path, ex)
logging.warning(errMsg)
return None, err(errMsg)
# create the symbol link /vmfs/volumes/datastore_name/dockvol/tenant_name
symlink_path = os.path.join(dock_vol_path, tenant_name)
if not os.path.isdir(symlink_path):
os.symlink(path, symlink_path)
logging.info("Symlink %s is created to point to path %s", symlink_path, path)
logging.info("Created %s", path)
return readable_path, None
def parse_vol_name(full_vol_name):
"""
Parses volume[@datastore] and returns (volume, datastore)
On parse errors raises ValidationError with syntax explanation
"""
# Parse volume name with regexp package
try:
at = full_vol_name.rindex('@')
vol_name = full_vol_name[:at]
ds_name = full_vol_name[at + 1:]
except ValueError:
# '@' not found
vol_name = full_vol_name
ds_name = None
# Now block the '-NNNNN' volume names
#
# Caveat: we block '-NNNNNN' in end of volume name to make sure that volume
# name never conflicts with VMDK snapshot name (e.g. 'disk-000001.vmdk').
# Note that N is a digit and there are exactly 6 of them (hardcoded in ESXi)
# vmdk_utils.py:list_vmdks() explicitly relies on this assumption.
if re.match(vmdk_utils.SNAP_NAME_REGEXP, vol_name):
raise ValidationError("Volume names ending with '-NNNNNN' (where N is a digit) are not supported")
# Check if the volume name is too long
if len(vol_name) > MAX_VOL_NAME_LEN:
raise ValidationError("Volume name is too long (max len is {0})".format(MAX_VOL_NAME_LEN))
# Check if the volume name contains illegal characters
for c in ILLEGAL_CHARACTERS:
if c in vol_name:
raise ValidationError("Volume name contains illegal characters: {0}".format(c))
# Check if the datastore name is too long
if ds_name:
if len(ds_name) > MAX_DS_NAME_LEN:
raise ValidationError("Datastore name is too long (max len is {0})".format(MAX_DS_NAME_LEN))
# Find case-insensitive match for the datastore
matching_datastores = [d for d in get_datastore_names_list() if d.lower() == ds_name.lower()]
# Return error if more than one datastores found
if len(matching_datastores) > 1:
raise ValidationError("Found multiple datastores with same name (ignoring case difference): {0}".format(matching_datastores))
# Found exactly one match
if len(matching_datastores) == 1:
# On Linux this is a redundant op, but on Windows it corrects the case
ds_name = matching_datastores[0]
# If len(matching_datastores) == 0, it means the ds_name is invalid.
# This will be taken care of by follow-up validation logic.
# Return qualified volume name and datastore name
return vol_name, ds_name
def get_full_vol_name(vmdk_name, datastore):
"""
Forms full volume name from vmdk file name an datastore as volume@datastore
"""
vol_name = vmdk_utils.strip_vmdk_extension(vmdk_name)
return "{0}@{1}".format(vol_name, datastore)
def datastore_path_exist(datastore_name):
""" Check whether path /vmfs/volumes/datastore_name" exist or not """
ds_path = os.path.join("/vmfs/volumes/", datastore_name)
return os.path.exists(ds_path)
def get_datastore_name(datastore_url):
""" Get datastore_name with given datastore_url """
logging.debug("get_datastore_name: datastore_url=%s", datastore_url)
datastore_name = vmdk_utils.get_datastore_name(datastore_url)
if datastore_name is None or not datastore_path_exist(datastore_name):
# path /vmfs/volumes/datastore_name does not exist
# the possible reason is datastore_name which got from
# datastore cache is invalid(old name) need to refresh
# cache, and try again, may still return None
logging.debug("get_datastore_name: datastore_name=%s path to /vmfs/volumes/datastore_name does not exist",
datastore_name)
vmdk_utils.init_datastoreCache(force=True)
datastore_name = vmdk_utils.get_datastore_name(datastore_url)
logging.debug("get_datastore_name: After refresh get datastore_name=%s", datastore_name)
return datastore_name
def authorize_check(vm_uuid, datastore_url, datastore, cmd, opts, use_default_ds, vm_datastore_url, vm_datastore):
"""
Check command from vm can be executed on the datastore or not
Return None on success or error_info if the command cannot be executed
"""
if use_default_ds:
# first check whether it has privilege to default_datastore
# privilege to default_datastore must always exists
error_info, tenant_uuid, tenant_name = auth.authorize(vm_uuid=vm_uuid,
datastore_url=datastore_url,
cmd=cmd,
opts=opts,
privilege_ds_url=datastore_url,
vm_datastore_url=vm_datastore_url)
if error_info:
return error_info
else:
# user passed in volume with format vol@datastore
# check the privilege to that datastore
error_info, tenant_uuid, tenant_name = auth.authorize(vm_uuid=vm_uuid,
datastore_url=datastore_url,
cmd=cmd,
opts=opts,
privilege_ds_url=datastore_url,
vm_datastore_url=vm_datastore_url)
# no privilege exists for the given datastore
# if the given datastore is the same as vm_datastore
# then we can check privilege against "_VM_DS"
# if no privilege exists for "_VM_DS" or given datastore is not the same
# as vm_datastore, need check against "_ALL_DS"
if error_info == error_code_to_message[ErrorCode.PRIVILEGE_NO_PRIVILEGE]:
if datastore == vm_datastore:
error_info, tenant_uuid, tenant_name = auth.authorize(vm_uuid=vm_uuid,
datastore_url=datastore_url,
cmd=cmd,
opts=opts,
privilege_ds_url=auth_data_const.VM_DS_URL,
vm_datastore_url=vm_datastore_url)
# privilege to "_VM_DS" exists, but authorize fails, return error_info
if error_info != error_code_to_message[ErrorCode.PRIVILEGE_NO_PRIVILEGE]:
return error_info
# privilege to "_VM_DS" does not exists or the given datastore is not the same as
# vm_datastore, check privilege against "_ALL_DS"
error_info, tenant_uuid, tenant_name =auth.authorize(vm_uuid=vm_uuid,
datastore_url=datastore_url,
cmd=cmd,
opts=opts,
privilege_ds_url=auth_data_const.ALL_DS_URL,
vm_datastore_url=vm_datastore_url)
if error_info:
return error_info
return None
# gets the requests, calculates path for volumes, and calls the relevant handler
def executeRequest(vm_uuid, vm_name, config_path, cmd, full_vol_name, opts, vc_uuid=None):
"""
Executes a <cmd> request issused from a VM.
The request is about volume <full_volume_name> in format volume@datastore.
If @datastore is omitted, "default_datastore" will be used if "default_datastore"
is specified for the tenant which VM belongs to;
the one where the VM resides is used is "default_datastore" is not specified.
For VM, the function gets vm_uuid, vm_name and config_path
<opts> is a json options string blindly passed to a specific operation
Returns None (if all OK) or error string
"""
logging.debug("config_path=%s", config_path)
# get datastore the VM is running on
vm_datastore_url = vmdk_utils.get_datastore_url_from_config_path(config_path)
vm_datastore = get_datastore_name(vm_datastore_url)
logging.debug("executeRequest: vm_datastore = %s, vm_datastore_url = %s",
vm_datastore, vm_datastore_url)
error_info, tenant_uuid, tenant_name = auth.get_tenant(vm_uuid)
force_detach = False
if error_info:
# For "docker volume ls", in case of error from the plugin Docker prints a list of cached volume names,
# which is misleading. To avoid this, we replace error with an empty list. See Issue #990 for details.
if (cmd == "list") and (not tenant_uuid):
return []
# We need special handling for failure to find tenants in "detach".
# get_tenant() will fail if the the VM was in the default VM group and the latter
# got deleted to tighten security.
# Note: since admin_cli will block removing a VM with attached disks from named groups,
# this fix only impacts "default" vmgroup removal. See issue #1441.
elif (cmd =="detach"):
force_detach = True
else:
return err(error_info)
if force_detach:
# Special (ugly) patch for detaching from VMs where we can't find tenant
# (e.g. tenant definition was removed)
# The patch is ugly since the affected code is a bit convoluted and can benefit
# from refactoring.
# The patch does the following: circumvents all the code for authentication and
# vmdk path calculation, and instead find good match in the list of devices actually attached.
logging.warning("executeRequest: FORCE_DETACH vm_uuid=%s, vm_name=%s, full_volume_name=%s",
vm_uuid, vm_name, full_vol_name)
# For detach, we get full volume name from docker so it should always be valid.
try:
vol_name, datastore = parse_vol_name(full_vol_name)
logging.info("vol_name=%s, datastore=%s", vol_name, datastore)
except ValidationError as ex:
return err(str(ex))
# we use tenant name to form a unique lock name, so let's fake it
tenant_name = "__NoSuchTenant__"
# Since we do not know the tenant and thus cannot construct the /vmfs/volumes/<datastore>/dockvols/<tenant>
# let's just look in the attached device for the best match.
vm = findVmByUuidChoice(vm_uuid, vc_uuid)
vmdk_path = vmdk_utils.get_attached_volume_path(vm, vol_name, datastore)
else:
# default_datastore must be set for tenant
error_info, default_datastore_url = auth_api.get_default_datastore_url(tenant_name)
if error_info:
return err(error_info.msg)
elif not default_datastore_url:
err_msg = error_code_to_message[ErrorCode.DS_DEFAULT_NOT_SET].format(tenant_name)
logging.warning(err_msg)
return err(err_msg)
# default_datastore could be a real datastore name or a hard coded one "_VM_DS"
default_datastore = get_datastore_name(default_datastore_url)
vcuuid = 'None'
if vc_uuid:
vcuuid = vc_uuid
logging.debug("executeRequest: vm uuid=%s VC uuid=%s name=%s, tenant_name=%s, default_datastore=%s",
vm_uuid, vcuuid, vm_name, tenant_name, default_datastore)
if cmd == "list":
threadutils.set_thread_name("{0}-nolock-{1}".format(vm_name, cmd))
# if default_datastore is not set, should return error
return listVMDK(tenant_name)
try:
vol_name, datastore = parse_vol_name(full_vol_name)
except ValidationError as ex:
return err(str(ex))
if datastore and not vmdk_utils.validate_datastore(datastore):
return err("Invalid datastore '%s'.\n" \
"Known datastores: %s.\n" \
"Default datastore: %s" \
% (datastore, ", ".join(get_datastore_names_list()), default_datastore))
if not datastore:
datastore_url = default_datastore_url
datastore = default_datastore
use_default_ds = True
else:
datastore_url = vmdk_utils.get_datastore_url(datastore)
use_default_ds = False
logging.debug("executeRequest: vm_uuid=%s, vm_name=%s, tenant_name=%s, tenant_uuid=%s, "
"default_datastore_url=%s datastore_url=%s",
vm_uuid, vm_name, tenant_uuid, tenant_name, default_datastore_url, datastore_url)
error_info = authorize_check(vm_uuid=vm_uuid,
datastore_url=datastore_url,
datastore=datastore,
cmd=cmd,
opts=opts,
use_default_ds=use_default_ds,
vm_datastore_url=vm_datastore_url,
vm_datastore=vm_datastore)
if error_info:
return err(error_info)
# get_vol_path() need to pass in a real datastore name
if datastore == auth_data_const.VM_DS:
datastore = vm_datastore
# set datastore_url to a real datastore_url
# createVMDK() and removeVMDK() need to pass in
# a real datastore_url instead of url of _VM_DS
datastore_url = vm_datastore_url
path, errMsg = get_vol_path(datastore, tenant_name)
logging.debug("executeRequest for tenant %s with path %s", tenant_name, path)
if path is None:
return errMsg
vmdk_path = vmdk_utils.get_vmdk_path(path, vol_name)
# Set up locking for volume operations.
# Lock name defaults to combination of DS,tenant name and vol name
lockname = "{}.{}.{}".format(vm_datastore, tenant_name, vol_name)
# Set thread name to vm_name-lockname
threadutils.set_thread_name("{0}-{1}".format(vm_name, lockname))
# Get a lock for the volume
logging.debug("Trying to acquire lock: %s", lockname)
with lockManager.get_lock(lockname):
logging.debug("Acquired lock: %s", lockname)
if cmd == "get":
response = getVMDK(vmdk_path, vol_name, datastore)
elif cmd == "create":
response = createVMDK(vmdk_path=vmdk_path,
vm_name=vm_name,
vm_uuid=vm_uuid,
vol_name=vol_name,
opts=opts,
tenant_uuid=tenant_uuid,
datastore_url=datastore_url,
vm_datastore_url=vm_datastore_url,
vm_datastore=vm_datastore)
elif cmd == "remove":
response = removeVMDK(vmdk_path=vmdk_path,
vol_name=vol_name,
vm_name=vm_name,
tenant_uuid=tenant_uuid,
datastore_url=datastore_url)
# For attach/detach reconfigure tasks, hold a per vm lock.
elif cmd == "attach":
with lockManager.get_lock(vm_uuid):
response = attachVMDK(vmdk_path=vmdk_path, vm_name=vm_name,
bios_uuid=vm_uuid, vc_uuid=vc_uuid)
elif cmd == "detach":
with lockManager.get_lock(vm_uuid):
response = detachVMDK(vmdk_path=vmdk_path, vm_name=vm_name,
bios_uuid=vm_uuid, vc_uuid=vc_uuid)
else:
return err("Unknown command:" + cmd)
logging.debug("Released lock: %s", lockname)
return response
def connectLocalSi():
'''
Initialize a connection to the local SI
'''
global _service_instance
if not _service_instance:
try:
logging.info("Connecting to the local Service Instance as 'dcui' ")
# Connect to local server as user "dcui" since this is the Admin that does not lose its
# Admin permissions even when the host is in lockdown mode. User "dcui" does not have a
# password - it is used by local application DCUI (Direct Console User Interface)
# Version must be set to access newer features, such as VSAN.
_service_instance = pyVim.connect.Connect(
host='localhost',
user='dcui',
version=newestVersions.Get('vim'))
except Exception as e:
logging.exception("Failed to create the local Service Instance as 'dcui', continuing... : ")
return
# set out ID in context to be used in request - so we'll see it in logs
reqCtx = VmomiSupport.GetRequestContext()
reqCtx["realUser"] = 'dvolplug'
atexit.register(pyVim.connect.Disconnect, _service_instance)
def get_si():
'''
Return a connection to the local SI
'''
with lockManager.get_lock('siLock'):
global _service_instance
try:
_service_instance.CurrentTime()
except:
# service_instance is invalid (could be stale)
# reset it to None and try to connect again.
_service_instance = None
connectLocalSi()
return _service_instance
def is_service_available():
"""
Check if connection to hostd service is available
"""
if not get_si():
return False
return True
def get_datastore_names_list():
"""returns names of known datastores"""
return [i[0] for i in vmdk_utils.get_datastores()]
def findDeviceByPath(vmdk_path, vm):
logging.debug("findDeviceByPath: Looking for device {0}".format(vmdk_path))
for d in vm.config.hardware.device:
if type(d) != vim.vm.device.VirtualDisk:
continue
# Disks of all backing have a backing object with a filename attribute.
# The filename identifies the virtual disk by name and can be used
# to match with the given volume name.
# Filename format is as follows:
# "[<datastore name>] <parent-directory>/tenant/<vmdk-descriptor-name>"
logging.debug("d.backing.fileName %s", d.backing.fileName)
ds, disk_path = d.backing.fileName.rsplit("]", 1)
datastore = ds[1:]
backing_disk = disk_path.lstrip()
logging.debug("findDeviceByPath: datastore=%s, backing_disk=%s", datastore, backing_disk)
# Construct the parent dir and vmdk name, resolving
# links if any.
dvol_dir = os.path.dirname(vmdk_path)
datastore_prefix = os.path.realpath(os.path.join("/vmfs/volumes", datastore)) + '/'
real_vol_dir = os.path.realpath(dvol_dir).replace(datastore_prefix, "")
virtual_disk = os.path.join(real_vol_dir, os.path.basename(vmdk_path))
logging.debug("dvol_dir=%s datastore_prefix=%s real_vol_dir=%s", dvol_dir, datastore_prefix,real_vol_dir)
logging.debug("backing_disk=%s virtual_disk=%s", backing_disk, virtual_disk)
if virtual_disk == backing_disk:
logging.debug("findDeviceByPath: MATCH: %s", backing_disk)
return d
return None
# Find the PCI slot number
def get_controller_pci_slot(vm, pvscsi, key_offset):
''' Return PCI slot number of the given PVSCSI controller
Input parameters:
vm: VM configuration
pvscsi: given PVSCSI controller
key_offset: offset from the bus number, controller_key - key_offset
is equal to the slot number of this given PVSCSI controller
'''
if pvscsi.slotInfo:
slot_num = pvscsi.slotInfo.pciSlotNumber
else:
# Slot number is got from from the VM config.
key = 'scsi{0}.pciSlotNumber'.format(pvscsi.key -
key_offset)
slot = [cfg for cfg in vm.config.extraConfig \
if cfg.key.lower() == key.lower()]
# If the given controller exists
if slot:
slot_num = slot[0].value
else:
return None
# Check if the PCI slot is on the primary or secondary bus
# and find the slot number for the bridge on the secondary
# bus.
orig_slot_num = slot_num
bus = (int(slot_num) >> PCI_BUS_BITS) & PCI_BUS_MASK
func = (int(slot_num) >> PCI_FUNC_BITS) & PCI_FUNC_MASK
while bus > 0:
bus = bus - 1
# Get PCI bridge slot number
key = 'pciBridge{0}.pciSlotNumber'.format(bus)
bridge_slot = [cfg for cfg in vm.config.extraConfig \
if cfg.key.lower() == key.lower()]
if bridge_slot:
slot_num = bridge_slot[0].value
else:
# We didn't find a PCI bridge for this bus.
return None
bus = (int(slot_num) >> PCI_BUS_BITS) & PCI_BUS_MASK
bus_num = '{0}.{1}'.format(hex(int(slot_num))[2:], func)
return [str(orig_slot_num), bus_num]
def dev_info(unit_number, pci_bus_slot_number):
'''Return a dictionary with Unit/Bus for the vmdk (or error)'''
return {'Unit': str(unit_number),
'ControllerPciSlotNumber': pci_bus_slot_number[0],
'ControllerPciBusNumber': pci_bus_slot_number[1]}
def reset_vol_meta(vmdk_path):
'''Clears metadata for vmdk_path'''
vol_meta = kv.getAll(vmdk_path)
if not vol_meta:
vol_meta = {}
logging.debug("Reseting meta-data for disk=%s", vmdk_path)
if set(vol_meta.keys()) & {kv.STATUS, kv.ATTACHED_VM_UUID}:
logging.debug("Old meta-data for %s was (status=%s VM uuid=%s)",
vmdk_path, vol_meta[kv.STATUS],
vol_meta[kv.ATTACHED_VM_UUID])
vol_meta[kv.STATUS] = kv.DETACHED
vol_meta[kv.ATTACHED_VM_UUID] = None
vol_meta[kv.ATTACHED_VM_NAME] = None
if not kv.setAll(vmdk_path, vol_meta):
msg = "Failed to save volume metadata for {0}.".format(vmdk_path)
logging.warning("reset_vol_meta: " + msg)
return err(msg)
def setStatusAttached(vmdk_path, vm, vm_dev_info=None):
'''Sets metadata for vmdk_path to (attached, attachedToVM=uuid'''
logging.debug("Set status=attached disk=%s VM name=%s uuid=%s", vmdk_path,
vm.config.name, vm.config.uuid)
vol_meta = kv.getAll(vmdk_path)
if not vol_meta:
vol_meta = {}
vol_meta[kv.STATUS] = kv.ATTACHED
vol_meta[kv.ATTACHED_VM_UUID] = vm.config.instanceUuid
vol_meta[kv.ATTACHED_VM_NAME] = vm.config.name
if vm_dev_info:
vol_meta[kv.ATTACHED_VM_DEV] = vm_dev_info
if not kv.setAll(vmdk_path, vol_meta):
logging.warning("Attach: Failed to save Disk metadata for %s", vmdk_path)
def setStatusDetached(vmdk_path, key=None, value=None):
'''Sets metadata for vmdk_path to "detached"'''
logging.debug("Set status=detached disk=%s", vmdk_path)
vol_meta = kv.getAll(vmdk_path)
if not vol_meta:
vol_meta = {}
vol_meta[kv.STATUS] = kv.DETACHED
# If attachedVMName is present, so is attachedVMUuid
try:
del vol_meta[kv.ATTACHED_VM_UUID]
del vol_meta[kv.ATTACHED_VM_NAME]
del vol_meta[kv.ATTACHED_VM_DEV]
except:
pass
if not kv.setAll(vmdk_path, vol_meta, key, value):
logging.warning("Detach: Failed to save Disk metadata for %s", vmdk_path)
def getStatusAttached(vmdk_path):
'''
Returns (attached, uuid, attach_as, vm_name) tuple. For 'detached' status
uuid and vm_name are None.
'''
vol_meta = kv.getAll(vmdk_path)
try:
attach_as = vol_meta[kv.VOL_OPTS][kv.ATTACH_AS]
except:
attach_as = kv.DEFAULT_ATTACH_AS
if not vol_meta or kv.STATUS not in vol_meta:
return False, None, attach_as, None
attached = (vol_meta[kv.STATUS] == kv.ATTACHED)
try:
uuid = vol_meta[kv.ATTACHED_VM_UUID]
except:
uuid = None
try:
vm_name = vol_meta[kv.ATTACHED_VM_NAME]
except:
vm_name = None
return attached, uuid, attach_as, vm_name
def log_attached_volume(vmdk_path, kv_uuid, vol_name):
'''
Log appropriate message for volume thats already attached.
'''
# Treat kv_uuid as vc uuid to find VM
cur_vm = findVmByUuid(kv_uuid, True)
if not cur_vm:
# Prior to #1526, uuid in KV is bios uuid.
logging.info("Using %s as BIOS uuid to find the VM", kv_uuid)
cur_vm = findVmByUuid(kv_uuid, False)
if cur_vm:
msg = "Disk {0} is already attached to VM {1}".format(vmdk_path,
cur_vm.config.name)
else:
msg = "Failed to find VM {0}({1}), disk {2} is already attached".format(vol_name,
kv_uuid,
vmdk_path)
logging.warning(msg)
def add_pvscsi_controller(vm, controllers, max_scsi_controllers, offset_from_bus_number):
'''
Add a new PVSCSI controller, return (controller_key, err) pair
'''
# find empty bus slot for the controller:
taken = set([c.busNumber for c in controllers])
avail = set(range(0, max_scsi_controllers)) - taken
key = avail.pop() # bus slot
controller_key = key + offset_from_bus_number
disk_slot = 0
controller_spec = vim.VirtualDeviceConfigSpec(
operation='add',
device=vim.ParaVirtualSCSIController(key=controller_key,
busNumber=key,
sharedBus='noSharing', ), )
# changes spec content goes here
pvscsi_change = []
pvscsi_change.append(controller_spec)
spec = vim.vm.ConfigSpec()
spec.deviceChange = pvscsi_change
try:
si = get_si()
wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])
except vim.fault.VimFault as ex:
msg=("Failed to add PVSCSI Controller: %s", ex.msg)
return None, err(msg)
logging.debug("Added a PVSCSI controller, controller_id=%d", controller_key)
return controller_key, None
def find_disk_slot_in_controller(vm, devices, pvsci, idx, offset_from_bus_number):
'''
Find an empty disk slot in the given controller, return disk_slot if an empty slot
can be found, otherwise, return None
'''
disk_slot = None
controller_key = pvsci[idx].key
taken = set([dev.unitNumber
for dev in devices
if type(dev) == vim.VirtualDisk and dev.controllerKey ==
controller_key])
# search in 15 slots, with unit_number 7 reserved for scsi controller
avail_slots = (set(range(0, 7)) | set(range(8, PVSCSI_MAX_TARGETS))) - taken
logging.debug("idx=%d controller_key=%d avail_slots=%d", idx, controller_key, len(avail_slots))
if len(avail_slots) != 0:
disk_slot = avail_slots.pop()
pci_slot_number = get_controller_pci_slot(vm, pvsci[idx],
offset_from_bus_number)
logging.debug("Find an available slot: controller_key = %d slot = %d", controller_key, disk_slot)
else:
logging.warning("No available slot in this controller: controller_key = %d", controller_key)
return disk_slot
def find_available_disk_slot(vm, devices, pvsci, offset_from_bus_number):
'''
Iterate through all the existing PVSCSI controllers attached to a VM to find an empty
disk slot. Return disk_slot is an empty slot can be found, otherwise, return None
'''
idx = 0
disk_slot = None
while ((disk_slot is None) and (idx < len(pvsci))):
disk_slot = find_disk_slot_in_controller(vm, devices, pvsci, idx, offset_from_bus_number)
if (disk_slot is None):
idx = idx + 1;
return idx, disk_slot
def disk_attach(vmdk_path, vm):
'''
Attaches *existing* disk to a vm on a PVSCI controller
(we need PVSCSI to avoid SCSI rescans in the guest)
return error or unit:bus numbers of newly attached disk.
'''
kv_status_attached, kv_uuid, attach_mode, attached_vm_name = getStatusAttached(vmdk_path)
logging.info("Attaching {0} as {1}".format(vmdk_path, attach_mode))
if kv_status_attached:
log_attached_volume(vmdk_path, kv_uuid, attached_vm_name)
# NOTE: vSphere is very picky about unit numbers and controllers of virtual
# disks. Every controller supports 15 virtual disks, and the unit
# numbers need to be unique within the controller and range from
# 0 to 15 with 7 being reserved (for older SCSI controllers).
# It is up to the API client to add controllers as needed.
# SCSI Controller keys are in the range of 1000 to 1003 (1000 + bus_number).
offset_from_bus_number = 1000
max_scsi_controllers = 4
devices = vm.config.hardware.device
# get all scsi controllers (pvsci, lsi logic, whatever)
controllers = [d for d in devices
if isinstance(d, vim.VirtualSCSIController)]
# Check if this disk is already attached, and if it is - skip the disk
# attach and the checks on attaching a controller if needed.
device = findDeviceByPath(vmdk_path, vm)
if device:
# Disk is already attached.
logging.warning("Disk %s already attached. VM=%s",
vmdk_path, vm.config.uuid)
setStatusAttached(vmdk_path, vm)
# Get that controller to which the device is configured for
pvsci = [d for d in controllers
if type(d) == vim.ParaVirtualSCSIController and
d.key == device.controllerKey]
return dev_info(device.unitNumber,
get_controller_pci_slot(vm, pvsci[0],
offset_from_bus_number))
# Disk isn't attached, make sure we have a PVSCI and add it if we don't
# check if we already have a pvsci one
pvsci = [d for d in controllers
if type(d) == vim.ParaVirtualSCSIController]
disk_slot = None
if len(pvsci) > 0:
idx, disk_slot = find_available_disk_slot(vm, devices, pvsci, offset_from_bus_number);
if (disk_slot is not None):
controller_key = pvsci[idx].key
pci_slot_number = get_controller_pci_slot(vm, pvsci[idx],
offset_from_bus_number)
logging.debug("Find an available disk slot, controller_key=%d, slot_id=%d",
controller_key, disk_slot)
if (disk_slot is None):
disk_slot = 0 # starting on a fresh controller
if len(controllers) >= max_scsi_controllers:
msg = "Failed to place new disk - The maximum number of supported volumes has been reached."
logging.error(msg + " VM=%s", vm.config.uuid)
return err(msg)
logging.info("Adding a PVSCSI controller")
controller_key, ret_err = add_pvscsi_controller(vm, controllers, max_scsi_controllers,
offset_from_bus_number)
if (ret_err):
return ret_err
# Find the controller just added
devices = vm.config.hardware.device
pvsci = [d for d in devices
if type(d) == vim.ParaVirtualSCSIController and
d.key == controller_key]
pci_slot_number = get_controller_pci_slot(vm, pvsci[0],
offset_from_bus_number)
logging.info("Added a PVSCSI controller, controller_key=%d pci_slot_number=%s",
controller_key, pci_slot_number[0])
# add disk as independent, so it won't be snapshotted with the Docker VM
disk_spec = vim.VirtualDeviceConfigSpec(
operation='add',
device=
vim.VirtualDisk(backing=vim.VirtualDiskFlatVer2BackingInfo(
fileName="[] " + vmdk_path,
diskMode=attach_mode, ),
deviceInfo=vim.Description(
# TODO: use docker volume name here. Issue #292
label="dockerDataVolume",
summary="dockerDataVolume", ),
unitNumber=disk_slot,
controllerKey=controller_key, ), )
disk_changes = []
disk_changes.append(disk_spec)
spec = vim.vm.ConfigSpec()
spec.deviceChange = disk_changes
try:
si = get_si()
wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])
except vim.fault.VimFault as ex:
msg = ex.msg
# Use metadata (KV) for extra logging
if kv_status_attached:
# KV claims we are attached to a different VM'.
cur_vm = vm_uuid2name(kv_uuid)
if not cur_vm:
cur_vm = attached_vm_name
msg += " disk {0} already attached to VM={1}".format(vmdk_path,
cur_vm)
if kv_uuid == vm.config.uuid:
msg += "(Current VM)"
return err(msg)
vm_dev_info = dev_info(disk_slot, pci_slot_number)
setStatusAttached(vmdk_path, vm, vm_dev_info)
logging.info("Disk %s successfully attached. controller pci_slot_number=%s, disk_slot=%d",
vmdk_path, pci_slot_number[0], disk_slot)
return vm_dev_info
def err(string):
return {u'Error': string}
def disk_detach(vmdk_path, vm):
"""detach disk (by full path) from a vm and return None or err(msg)"""
device = findDeviceByPath(vmdk_path, vm)
if not device:
# Could happen if the disk attached to a different VM - attach fails
# and docker will insist to sending "unmount/detach" which also fails.
# Or Plugin retrying operation due to socket errors #1076
# Return success since disk is anyway not attached
logging.warning("*** Detach disk={0} not found. VM={1}".format(
vmdk_path, vm.config.uuid))
return None
return disk_detach_int(vmdk_path, vm, device)
def disk_detach_int(vmdk_path, vm, device, key=None, value=None):
"""
Disk Detach imlementation. We get here after all validations are done,
and here we simply connect to ESX and execute Reconfig("remove disk") task
"""
si = get_si()
spec = vim.vm.ConfigSpec()
dev_changes = []
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
disk_spec.device = device
dev_changes.append(disk_spec)
spec.deviceChange = dev_changes
try:
wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])
except vim.Fault.VimFault as ex:
ex_type, ex_value, ex_traceback = sys.exc_info()
msg = "Failed to detach %s: %s" % (vmdk_path, ex.msg)
logging.warning("%s\n%s", msg, "".join(traceback.format_tb(ex_traceback)))
return err(msg)
setStatusDetached(vmdk_path, key, value)
logging.info("Disk detached %s", vmdk_path)
return None
# Edit settings for a volume identified by its full path
def set_vol_opts(name, tenant_name, options):
# Create a dict of the options, the options are provided as
# "access=read-only" and we get a dict like {'access': 'read-only'}
opts_list = "".join(options.replace("=", ":").split())
opts = dict(i.split(":") for i in opts_list.split(","))
# create volume path
try:
vol_name, datastore = parse_vol_name(name)
except ValidationError as ex:
logging.exception(ex)
return False
logging.debug("set_vol_opts: name=%s options=%s vol_name=%s, datastore=%s",
name, options, vol_name, datastore)
if not datastore:
msg = "Invalid datastore '{0}'.\n".format(datastore)
logging.warning(msg)
return False
datastore_url = vmdk_utils.get_datastore_url(datastore)
# try to set opts on a volume which was created by a non-exist tenant
# fail the request
if tenant_name:
# if tenant_name is "None", which means the function is called without multi-tenancy
error_info = auth_api.check_tenant_exist(tenant_name)
if not error_info:
logging.warning(error_code_to_message[ErrorCode.TENANT_NOT_EXIST].format(tenant_name))
return False
# get /vmfs/volumes/<datastore_url>/dockvols path on ESX:
path, errMsg = get_vol_path(datastore, tenant_name)
if path is None:
msg = "Failed to get datastore path {0}".format(path)
logging.warning(msg)
return False
vmdk_path = vmdk_utils.get_vmdk_path(path, vol_name)
logging.debug("set_vol_opts: path=%s vmdk_path=%s", path, vmdk_path)
if not os.path.isfile(vmdk_path):
msg = 'Volume {0} not found.'.format(vol_name)
logging.warning(msg)
return False
# For now only allow resetting the access and attach-as options.
valid_opts = {
kv.ACCESS : kv.ACCESS_TYPES,
kv.ATTACH_AS : kv.ATTACH_AS_TYPES
}
invalid = frozenset(opts.keys()).difference(valid_opts.keys())
if len(invalid) != 0:
msg = 'Invalid options: {0} \n'.format(list(invalid)) \
+ 'Options that can be edited: ' \
+ '{0}'.format(list(valid_opts))
raise ValidationError(msg)
has_invalid_opt_value = False
for key in opts.keys():
if key in valid_opts:
if not opts[key] in valid_opts[key]:
msg = 'Invalid option value {0}.\n'.format(opts[key]) +\
'Supported values are {0}.\n'.format(valid_opts[key])
logging.warning(msg)
has_invalid_opt_value = True
if has_invalid_opt_value:
return False
vol_meta = kv.getAll(vmdk_path)
if vol_meta:
if not vol_meta[kv.VOL_OPTS]:
vol_meta[kv.VOL_OPTS] = {}
for key in opts.keys():
vol_meta[kv.VOL_OPTS][key] = opts[key]
return kv.setAll(vmdk_path, vol_meta)
return False
def wait_ops_in_flight():
# Wait for the event indicating all in-flight ops are drained
eventReceived = opsCounter.wait(WAIT_OPS_TIMEOUT)
if (eventReceived):
logging.info("All in-flight operations are completed - exiting")
os.kill(os.getpid(), signal.SIGKILL) # kill the main process
else:
logging.warn("In-flight operations are taking too long to complete - abandoning wait")
def signal_handler_stop(signalnum, frame):
global stopBarrier
logging.warn("Received signal num: %d - exiting", signalnum)
if (opsCounter.value == 0):
logging.info("No in-flight operations - exiting")
sys.exit(0)
# Set the stop barrier to true
logging.debug("Setting stop barrier to true")
stopBarrier = True
# Fire a thread to wait for ops in flight to drain
threadutils.start_new_thread(target=wait_ops_in_flight)
def load_vmci():
global lib
logging.info("Loading VMCI server lib.")
if sys.hexversion >= PYTHON64_VERSION:
lib = CDLL(os.path.join(LIB_LOC64, "libvmci_srv.so"), use_errno=True)
else:
lib = CDLL(os.path.join(LIB_LOC, "libvmci_srv.so"), use_errno=True)
def send_vmci_reply(client_socket, reply_string):
reply = json.dumps(reply_string)
response = lib.vmci_reply(client_socket, c_char_p(reply.encode()))
errno = get_errno()
logging.debug("lib.vmci_reply: VMCI replied with errcode %s", response)
if response == VMCI_ERROR:
logging.warning("vmci_reply returned error %s (errno=%d)",
os.strerror(errno), errno)
def execRequestThread(client_socket, cartel, request):
'''
Execute requests in a thread context with a per volume locking.
'''
# Before we start, block to allow main thread or other running threads to advance.
# https://docs.python.org/2/faq/library.html#none-of-my-threads-seem-to-run-why
time.sleep(0.001)
try:
# Get VM name & ID from VSI (we only get cartelID from vmci, need to convert)
vmm_leader = vsi.get("/userworld/cartel/%s/vmmLeader" % str(cartel))
group_info = vsi.get("/vm/%s/vmmGroupInfo" % vmm_leader)
vm_name = group_info["displayName"]
cfg_path = group_info["cfgPath"]
uuid = group_info["uuid"] # BIOS UUID, see http://www.virtu-al.net/2015/12/04/a-quick-reference-of-vsphere-ids/
vcuuid = group_info["vcUuid"] # VC UUID
# pyVmomi expects uuid like this one: 564dac12-b1a0-f735-0df3-bceb00b30340
# to get it from uuid in VSI vms/<id>/vmmGroup, we use the following format:
UUID_FORMAT = "{0}{1}{2}{3}-{4}{5}-{6}{7}-{8}{9}-{10}{11}{12}{13}{14}{15}"
vm_uuid = UUID_FORMAT.format(*uuid.replace("-", " ").split())
vc_uuid = None
# Use a VC uuid if one is present.
if len(vcuuid) > 0:
vc_uuid = UUID_FORMAT.format(*vcuuid.replace("-", " ").split())
try:
req = json.loads(request.decode('utf-8'))
except ValueError as e:
reply_string = {u'Error': "Failed to parse json '%s'." % request}
send_vmci_reply(client_socket, reply_string)
else:
logging.debug("execRequestThread: req=%s", req)
# If req from client does not include version number, set the version to
# SERVER_PROTOCOL_VERSION by default to make backward compatible
client_protocol_version = int(req["version"]) if "version" in req else SERVER_PROTOCOL_VERSION
logging.debug("execRequestThread: client protocol version=%d", client_protocol_version)
if client_protocol_version != SERVER_PROTOCOL_VERSION:
reply_string = err("""There is a mismatch between VDVS client (Docker plugin) protocol version
({}) and server (ESXi) protocol version ({}) which indicates different
versions of the product are installed on Guest and ESXi sides,
please make sure VDVS plugin and driver are from the same release version.
""".format(client_protocol_version, SERVER_PROTOCOL_VERSION))
send_vmci_reply(client_socket, reply_string)
logging.warning("executeRequest '%s' failed: %s", req["cmd"], reply_string)
return
# If the command is "version" then there is no need to handle the request via
# the normal VM request handler.
if req["cmd"] == "version":
reply_string = {u'version': "%s" % vmdk_utils.get_version()}
else:
opts = req["details"]["Opts"] if "Opts" in req["details"] else {}
reply_string = executeRequest(
vm_uuid=vm_uuid,
vc_uuid=vc_uuid,
vm_name=vm_name,
config_path=cfg_path,
cmd=req["cmd"],
full_vol_name=req["details"]["Name"],
opts=opts)
logging.info("executeRequest '%s' completed with ret=%s", req["cmd"], reply_string)
send_vmci_reply(client_socket, reply_string)
except Exception as ex_thr:
logging.exception("Unhandled Exception:")
reply_string = err("Server returned an error: {0}".format(repr(ex_thr)))
send_vmci_reply(client_socket, reply_string)
finally:
opsCounter.decr()
# code to grab/release VMCI listening socket
g_vmci_listening_socket = None
def vmci_grab_listening_socket(port):
"""call C code to open/bind/listen on the VMCI socket"""
global g_vmci_listening_socket
if g_vmci_listening_socket:
logging.error("VMCI Listening socket - multiple init") # message for us. Should never happen
return
g_vmci_listening_socket = lib.vmci_init(c_uint(port))
if g_vmci_listening_socket == VMCI_ERROR:
errno = get_errno()
raise OSError("Failed to initialize vSocket listener: %s (errno=%d)" \
% (os.strerror(errno), errno))
def vmci_release_listening_socket():
"""Calls C code to release the VMCI listening socket"""
if g_vmci_listening_socket:
lib.vmci_close(g_vmci_listening_socket)
# load VMCI shared lib , listen on vSocket in main loop, handle requests
def handleVmciRequests(port):
skip_count = MAX_SKIP_COUNT # retries for vmci_get_one_op failures
bsize = MAX_JSON_SIZE
txt = create_string_buffer(bsize)
cartel = c_int32()
vmci_grab_listening_socket(port)
while True:
# Listening on VMCI socket
logging.debug("lib.vmci_get_one_op: waiting for new request...")
c = lib.vmci_get_one_op(g_vmci_listening_socket, byref(cartel), txt, c_int(bsize))
logging.debug("lib.vmci_get_one_op returns %d, buffer '%s'", c, txt.value)
errno = get_errno()
if errno == ECONNABORTED:
logging.warn("Client with non privileged port attempted a request")
continue
if c == VMCI_ERROR:
# We can self-correct by reoping sockets internally. Give it a chance.
logging.warning("vmci_get_one_op failed ret=%d: %s (errno=%d) Retrying...",
c, os.strerror(errno), errno)
skip_count = skip_count - 1
if skip_count <= 0:
raise Exception(
"vmci_get_one_op: too many errors. Giving up.")
continue
else:
skip_count = MAX_SKIP_COUNT # reset the counter, just in case
client_socket = c # Bind to avoid race conditions.
# Check the stop barrier - if set, fail new incoming requests and exit the loop
if stopBarrier:
svc_stop_err = 'Service is being stopped: operation declined!'
logging.warning(svc_stop_err)
send_vmci_reply(client_socket, err(svc_stop_err))
continue
if not get_si():
svc_connect_err = 'Service is presently unavailable, ensure the ESXi Host Agent is running on this host'
logging.warning(svc_connect_err)
send_vmci_reply(client_socket, err(svc_connect_err))
continue
opsCounter.incr()
# Fire a thread to execute the request
threadutils.start_new_thread(
target=execRequestThread,
args=(client_socket, cartel.value, txt.value))
# Close listening socket when the loop is over
logging.info("Closing VMCI listening socket...")
vmci_release_listening_socket()
def usage():
print("Usage: %s -p <vSocket Port to listen on>" % sys.argv[0])
def main():
log_config.configure()
logging.info("==== Starting vmdkops service ====")
logging.info("Version: %s , Pid: %d", vmdk_utils.get_version(), os.getpid() )
signal.signal(signal.SIGINT, signal_handler_stop)
signal.signal(signal.SIGTERM, signal_handler_stop)
try:
port = 1019
opts, args = getopt.getopt(sys.argv[1:], 'hp:')
except getopt.error as msg:
if msg:
logging.exception(msg)
usage()
return 1
for a, v in opts:
if a == '-p':
port = int(v)
if a == '-h':
usage()
return 0
try:
# Load and use DLL with vsocket shim to listen for docker requests
load_vmci()
kv.init()
connectLocalSi()
# start the daemon. Do all the task to start the listener through the daemon
threadutils.start_new_thread(target=vm_listener.start_vm_changelistener,
daemon=True)
handleVmciRequests(port)
except Exception as e:
logging.exception(e)
def getTaskList(prop_collector, tasks):
# Create filter
obj_specs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
property_spec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task,
pathSet=[],
all=True)
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = obj_specs
filter_spec.propSet = [property_spec]
return prop_collector.CreateFilter(filter_spec, True)
#-----------------------------------------------------------
#
# Support for 'wait for task completion'
# Keep it here to keep a single file for now
#
"""
Written by Michael Rice <[email protected]>
Github: https://github.com/michaelrice
Website: https://michaelrice.github.io/
Blog: http://www.errr-online.com/
This code has been released under the terms of the Apache 2 licenses
http://www.apache.org/licenses/LICENSE-2.0.html
Helper module for task operations.
"""
def wait_for_tasks(si, tasks):
"""Given the service instance si and tasks, it returns after all the
tasks are complete
"""
task_list = [str(task) for task in tasks]
property_collector = si.content.propertyCollector
pcfilter = getTaskList(property_collector, tasks)
try:
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while len(task_list):
update = property_collector.WaitForUpdates(version)
for filter_set in update.filterSet:
for obj_set in filter_set.objectSet:
task = obj_set.obj
for change in obj_set.changeSet:
if change.name == 'info':
state = change.val.state
elif change.name == 'info.state':
state = change.val
else:
continue
if not str(task) in task_list:
continue
if state == vim.TaskInfo.State.success:
# Remove task from taskList
task_list.remove(str(task))
elif state == vim.TaskInfo.State.error:
raise task.info.error
# Move to next version
version = update.version
finally:
if pcfilter:
pcfilter.Destroy()
#------------------------
class ValidationError(Exception):
""" An exception for option validation errors """
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
# start the server
if __name__ == "__main__":
# Setting LANG environment variable if it is unset to ensure proper encoding
if os.environ.get('LANG') is None:
os.environ['LANG'] = "en_US.UTF-8"
os.execve(__file__, sys.argv, os.environ)
main()
| apache-2.0 | -5,459,104,080,962,051,000 | 39.649148 | 137 | 0.606508 | false |
offlinehacker/flumotion | flumotion/common/debug.py | 3 | 9604 | # Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
"""debugging helper code
"""
import linecache
import gc
import re
import sys
import types
from twisted.python.reflect import filenameToModuleName
__version__ = "$Rev$"
_tracing = 0
_indent = ''
def trace_start(func_filter=None, ignore_files_re=None, print_returns=False,
write=None):
global _tracing, _indent
if func_filter:
func_filter = re.compile(func_filter)
if ignore_files_re:
ignore_files_re = re.compile(ignore_files_re)
if not write:
def write(indent, str, *args):
print (indent + str) % args
def do_trace(frame, event, arg):
global _tracing, _indent
if not _tracing:
print '[tracing stopped]'
return None
co = frame.f_code
if event == 'line':
return do_trace
if func_filter and not func_filter.search(co.co_name):
return None
if ignore_files_re and ignore_files_re.search(co.co_filename):
return None
elif event == 'call' or event == 'c_call':
if co.co_name == '?':
return None
module = filenameToModuleName(co.co_filename)
write(_indent, '%s:%d:%s():', module, frame.f_lineno, co.co_name)
_indent += ' '
return do_trace
elif event == 'return' or event == 'c_return':
if print_returns:
write(_indent, 'return %r', arg)
_indent = _indent[:-2]
return None
elif event == 'exception' or event == 'c_exception':
if arg:
write(_indent, 'Exception: %s:%d: %s (%s)', co.co_filename,
frame.f_lineno, arg[0].__name__, arg[1])
else:
write(_indent, 'Exception: (from C)')
return do_trace
else:
write(_indent, 'unknown event: %s', event)
return None
_tracing += 1
if _tracing == 1:
assert _indent == ''
sys.settrace(do_trace)
def trace_stop():
global _tracing, _indent
assert _tracing > 0
_tracing -= 1
if not _tracing:
sys.settrace(None)
_indent = ''
def print_stack(handle=None):
f = sys._getframe(1)
output = []
while f:
co = f.f_code
filename = co.co_filename
lineno = f.f_lineno
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno)
# reversed so we can reverse() later
if f.f_locals:
for k, v in f.f_locals.items():
output.append(' %s = %r\n' % (k, v))
output.append(' Locals:\n')
if line:
output.append(' %s\n' % line.strip())
output.append(' File "%s", line %d, in %s\n' % (
filename, lineno, name))
f = f.f_back
output.reverse()
if handle is None:
handle = sys.stdout
for line in output:
handle.write(line)
class UncollectableMonitor(object):
def __init__(self, period=120):
known = {}
# set this if you want python to print out when uncollectable
# objects are detected; will print out all objects in the cycle,
# not just the one(s) that caused the cycle to be uncollectable
#
# gc.set_debug(gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_INSTANCES |
# gc.DEBUG_OBJECTS)
from twisted.internet import reactor
def sample():
gc.collect()
for o in gc.garbage:
if o not in known:
known[o] = True
self.uncollectable(o)
reactor.callLater(period, sample)
reactor.callLater(period, sample)
def uncollectable(self, obj):
print '\nUncollectable object cycle in gc.garbage:'
print "Parents:"
self._printParents(obj, 2)
print "Kids:"
self._printKids(obj, 2)
def _printParents(self, obj, level, indent=' '):
print indent, self._shortRepr(obj)
if level > 0:
for p in gc.get_referrers(obj):
self._printParents(p, level - 1, indent + ' ')
def _printKids(self, obj, level, indent=' '):
print indent, self._shortRepr(obj)
if level > 0:
for kid in gc.get_referents(obj):
self._printKids(kid, level - 1, indent + ' ')
def _shortRepr(self, obj):
if not isinstance(obj, dict):
return '%s %r @ 0x%x' % (type(obj).__name__, obj, id(obj))
else:
keys = obj.keys()
keys.sort()
return 'dict with keys %r @ 0x%x' % (keys, id(obj))
class AllocMonitor(object):
def __init__(self, period=10, analyze=None, allocPrint=None):
self.period = period
self.objset = None
from sizer import scanner, annotate
from twisted.internet import reactor
if analyze is not None:
self.analyze = analyze
if allocPrint is not None:
self.allocPrint = allocPrint
def sample():
objset = scanner.Objects()
annotate.markparents(objset)
if self.objset:
self.analyze(self.objset, objset)
self.objset = objset
reactor.callLater(self.period, sample)
reactor.callLater(self.period, sample)
def analyze(self, old, new):
from sizer import operations
size = 0
for k in operations.diff(new, old):
size -= old[k].size
allocators = {}
diff = operations.diff(old, new)
for k in diff:
w = new[k]
size += w.size
if not w.parents:
print "Unreferenced object %r, what?" % (w, )
for p in w.parents:
if id(p.obj) == id(self.__dict__):
continue
if id(p.obj) not in diff:
# print "Object %r alloced by %r" % (w, p)
if p not in allocators:
allocators[p] = []
allocators[p].append(w)
print "Total alloc size:", size
for p in allocators:
if p.obj == old or p.obj == new:
print 'foo'
else:
self.allocPrint(p, allocators[p])
for o in gc.garbage:
print '\nUncollectable object cycle in gc.garbage:'
self._printCycle(new[id(o)])
def _printCycle(self, root):
print "Parents:"
self._printParents(root, 2)
print "Kids:"
self._printKids(root, 2)
def _printParents(self, wrap, level, indent=' '):
print indent, self._wrapperRepr(wrap)
if level > 0:
for p in wrap.parents:
self._printParents(p, level - 1, indent + ' ')
def _printKids(self, wrap, level, indent=' '):
print indent, self._wrapperRepr(wrap)
if level > 0:
for kid in wrap.children:
self._printKids(kid, level - 1, indent + ' ')
def _allocStack(self, wrap, stack):
stack.append(wrap)
for p in wrap.parents:
if (isinstance(p.obj, types.ModuleType)
or isinstance(p.obj, type)
or isinstance(p.obj, types.InstanceType)):
stack.append(p)
return stack
if len(wrap.parents) == 1:
return self._allocStack(wrap.parents[0], stack)
return stack
def _wrapperRepr(self, wrap):
o = wrap.obj
if wrap.type != dict:
return '%s %r @ 0x%x' % (wrap.type.__name__, o, id(o))
else:
keys = o.keys()
keys.sort()
return 'dict with keys %r @ 0x%x' % (keys, id(o))
def allocPrint(self, allocator, directAllocs):
allocStack = self._allocStack(allocator, [])
print '\nAlloc by ' + self._wrapperRepr(allocStack.pop(0))
while allocStack:
print ' referenced by ' + self._wrapperRepr(allocStack.pop(0))
print "%d new %s:" % (len(directAllocs),
len(directAllocs) == 1 and "object" or "objects")
for wrap in directAllocs:
print ' ' + self._wrapperRepr(wrap)
def getVersions():
"""
Get versions of all flumotion modules based on svn Rev keyword.
"""
r = {}
for modname in sys.modules:
mod = sys.modules[modname]
if modname.startswith('flumotion.') and hasattr(mod, "__version__"):
# Has the form: "$Rev$"
try:
versionnum = int(mod.__version__[6:-2])
r[modname] = versionnum
except IndexError:
pass
except ValueError:
pass
return r
| gpl-2.0 | -865,422,278,063,557,100 | 29.782051 | 79 | 0.544461 | false |
bhaveshAn/crisscross | crisscross/platforms/macosx/wifi.py | 1 | 4934 | from crisscross.facades import Wifi
from pyobjus.dylib_manager import load_framework, INCLUDE
from pyobjus import autoclass
load_framework(INCLUDE.Foundation)
load_framework(INCLUDE.CoreWLAN)
CWInterface = autoclass('CWInterface')
CWNetwork = autoclass('CWNetwork')
CWWiFiClient = autoclass('CWWiFiClient')
NSArray = autoclass('NSArray')
NSDictionary = autoclass('NSDictionary')
NSString = autoclass('NSString')
class OSXWifi(Wifi):
names = {}
def _is_enabled(self):
'''
Returns `True` if the Wifi is enabled else returns `False`.
'''
return CWWiFiClient.sharedWiFiClient().interface().powerOn()
def _get_network_info(self, name):
'''
Returns all the network information.
'''
def ns(x):
NSString.alloc().initWithUTF8String_(x)
accessNetworkType = self.names[name].accessNetworkType
aggregateRSSI = self.names[name].aggregateRSSI
beaconInterval = self.names[name].beaconInterval
bssid = self.names[name].bssid.UTF8String()
countryCode = self.names[name].countryCode
hasInternet = self.names[name].hasInternet
hasInterworkingIE = self.names[name].hasInterworkingIE
hessid = self.names[name].hessid
ibss = self.names[name].ibss
isAdditionalStepRequiredForAccess = \
self.names[name].isAdditionalStepRequiredForAccess
isCarPlayNetwork = self.names[name].isCarPlayNetwork
isEmergencyServicesReachable = \
self.names[name].isEmergencyServicesReachable
isPasspoint = self.names[name].isPasspoint
isPersonalHotspot = self.names[name].isPersonalHotspot
isUnauthenticatedEmergencyServiceAccessible = \
self.names[name].isUnauthenticatedEmergencyServiceAccessible
noiseMeasurement = self.names[name].noiseMeasurement
physicalLayerMode = self.names[name].physicalLayerMode
rssiValue = self.names[name].rssiValue
securityType = self.names[name].securityType
ssid = self.names[name].ssid.UTF8String()
supportsEasyConnect = self.names[name].supportsEasyConnect
supportsWPS = self.names[name].supportsWPS
venueGroup = self.names[name].venueGroup
venueType = self.names[name].venueType
return {'accessNetworkType': accessNetworkType,
'aggregateRSSI': aggregateRSSI,
'beaconInterval': beaconInterval,
'bssid': bssid,
'countryCode': countryCode,
'hasInternet': hasInternet,
'hasInternet': hasInternet,
'hasInterworkingIE': hasInterworkingIE,
'hessid': hessid,
'ibss': ibss,
'isAdditionalStepRequiredForAccess':
isAdditionalStepRequiredForAccess,
'isCarPlayNetwork': isCarPlayNetwork,
'isEmergencyServicesReachable': isEmergencyServicesReachable,
'isPasspoint': isPasspoint,
'isPersonalHotspot': isPersonalHotspot,
'isUnauthenticatedEmergencyServiceAccessible':
isUnauthenticatedEmergencyServiceAccessible,
'noiseMeasurement': noiseMeasurement,
'physicalLayerMode': physicalLayerMode,
'rssiValue': rssiValue,
'securityType': securityType,
'ssid': ssid,
'supportsEasyConnect': supportsEasyConnect,
'supportsWPS': supportsWPS,
'venueGroup': venueGroup,
'venueType': venueType}
def _start_scanning(self):
'''
Starts scanning for available Wi-Fi networks.
'''
if self._is_enabled():
self.names = {}
c = CWInterface.interface()
scan = c.scanForNetworksWithName_error_(None, None)
cnt = scan.allObjects().count()
for i in range(cnt):
self.names[
scan.allObjects().objectAtIndex_(i).ssid.UTF8String()] \
= scan.allObjects().objectAtIndex_(i)
else:
raise Exception("Wifi not enabled.")
def _get_available_wifi(self):
'''
Returns the name of available networks.
'''
return self.names.keys()
def _connect(self, network, parameters):
'''
Expects 2 parameters:
- name/ssid of the network.
- password: dict type
'''
password = parameters['password']
network_object = self.names[network]
CWInterface.interface().associateToNetwork_password_error_(
network_object,
password,
None)
return
def _disconnect(self):
'''
Disconnect from network.
'''
CWInterface.interface().disassociate()
return
def instance():
return OSXWifi()
| mit | 98,203,984,812,987,540 | 36.097744 | 77 | 0.612485 | false |
coreycb/horizon | openstack_dashboard/dashboards/admin/networks/ports/views.py | 6 | 4347 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.networks.ports \
import forms as ports_forms
from openstack_dashboard.dashboards.admin.networks.ports \
import tables as ports_tables
from openstack_dashboard.dashboards.admin.networks.ports \
import tabs as ports_tabs
from openstack_dashboard.dashboards.project.networks.ports \
import views as project_views
class CreateView(forms.ModalFormView):
form_class = ports_forms.CreatePort
form_id = "create_port_form"
modal_header = _("Create Port")
submit_label = _("Create Port")
submit_url = "horizon:admin:networks:addport"
page_title = _("Create Port")
template_name = 'admin/networks/ports/create.html'
url = 'horizon:admin:networks:detail'
def get_success_url(self):
return reverse(self.url,
args=(self.kwargs['network_id'],))
@memoized.memoized_method
def get_object(self):
try:
network_id = self.kwargs["network_id"]
return api.neutron.network_get(self.request, network_id)
except Exception:
redirect = reverse(self.url,
args=(self.kwargs['network_id'],))
msg = _("Unable to retrieve network.")
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
context['network'] = self.get_object()
args = (self.kwargs['network_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
context['cancel_url'] = reverse(self.url, args=args)
return context
def get_initial(self):
network = self.get_object()
return {"network_id": self.kwargs['network_id'],
"network_name": network.name}
class DetailView(project_views.DetailView):
tab_group_class = ports_tabs.PortDetailTabs
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
port = context["port"]
network_url = "horizon:admin:networks:detail"
subnet_url = "horizon:admin:networks:subnets:detail"
port.network_url = reverse(network_url, args=[port.network_id])
for ip in port.fixed_ips:
ip['subnet_url'] = reverse(subnet_url, args=[ip['subnet_id']])
table = ports_tables.PortsTable(self.request,
network_id=port.network_id)
# TODO(robcresswell) Add URL for "Ports" crumb after bug/1416838
breadcrumb = [
(_("Networks"), self.get_redirect_url()),
((port.network_name or port.network_id), port.network_url),
(_("Ports"),), ]
context["custom_breadcrumb"] = breadcrumb
context["url"] = reverse('horizon:admin:networks:index')
context["actions"] = table.render_row_actions(port)
return context
@staticmethod
def get_redirect_url():
return reverse('horizon:admin:networks:index')
class UpdateView(project_views.UpdateView):
form_class = ports_forms.UpdatePort
template_name = 'admin/networks/ports/update.html'
context_object_name = 'port'
submit_url = "horizon:admin:networks:editport"
success_url = 'horizon:admin:networks:detail'
def get_initial(self):
initial = super(UpdateView, self).get_initial()
port = self._get_object()
initial['binding__host_id'] = port['binding__host_id']
return initial
| apache-2.0 | 4,280,966,572,426,464,000 | 37.8125 | 78 | 0.657465 | false |
jaruba/chromium.src | tools/perf/page_sets/startup_pages_record.py | 20 | 1288 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class StartupPagesRecordPage(page_module.Page):
def __init__(self, url, page_set):
super(StartupPagesRecordPage, self).__init__(url=url, page_set=page_set)
self.archive_data_file = 'data/startup_pages.json'
class StartupPagesRecordPageSet(page_set_module.PageSet):
""" Pages to record data for testing starting Chrome with a URL.
We can't use startup_pages.json with record_wpr, since record_wpr
requires a default navigate step, which we don't want for startup
testing; but we do want to record the pages it uses. Also, record_wpr
fails on about:blank, which we want to include in startup testing.
"""
def __init__(self):
super(StartupPagesRecordPageSet, self).__init__(
archive_data_file='data/startup_pages.json')
urls_list = [
# Why: typical page
'http://bbc.co.uk',
# Why: Horribly complex page - stress test!
'http://kapook.com',
]
for url in urls_list:
self.AddUserStory(StartupPagesRecordPage(url, self))
| bsd-3-clause | 1,525,765,499,153,457,400 | 34.777778 | 76 | 0.697205 | false |
thomasyu888/synapsePythonClient | tests/integration/synapseclient/test_command_line_client.py | 1 | 39073 | import json
import logging
import filecmp
import os
import re
import sys
import uuid
import time
import tempfile
import shutil
import pytest
from unittest.mock import patch
from synapseclient import client
from synapseclient import Annotations, Column, Evaluation, File, Folder, Project, Row, RowSet, Schema, Synapse
import synapseclient.__main__ as cmdline
import synapseclient.core.utils as utils
from io import StringIO
@pytest.fixture(scope='module')
def test_state(syn, project, schedule_for_cleanup):
class State:
def __init__(self):
self.syn = syn
self.project = project
self.schedule_for_cleanup = schedule_for_cleanup
self.parser = cmdline.build_parser()
self.upload_filename = _create_temp_file_with_cleanup(schedule_for_cleanup)
self.description_text = "'some description text'"
self.desc_filename = _create_temp_file_with_cleanup(schedule_for_cleanup, self.description_text)
self.update_description_text = \
"'SOMEBODY ONCE TOLD ME THE WORLD WAS GONNA ROLL ME I AINT THE SHARPEST TOOL IN THE SHED'"
return State()
def run(test_state, *command, **kwargs):
"""
Sends the given command list to the command line client.
:returns: The STDOUT output of the command.
"""
old_stdout = sys.stdout
capturedSTDOUT = StringIO()
syn_client = kwargs.get('syn', test_state.syn)
stream_handler = logging.StreamHandler(capturedSTDOUT)
try:
sys.stdout = capturedSTDOUT
syn_client.logger.addHandler(stream_handler)
sys.argv = [item for item in command]
args = test_state.parser.parse_args()
args.debug = True
cmdline.perform_main(args, syn_client)
except SystemExit:
pass # Prevent the test from quitting prematurely
finally:
sys.stdout = old_stdout
syn_client.logger.handlers.remove(stream_handler)
capturedSTDOUT = capturedSTDOUT.getvalue()
return capturedSTDOUT
def parse(regex, output):
"""Returns the first match."""
m = re.search(regex, output)
if m:
if len(m.groups()) > 0:
return m.group(1).strip()
else:
raise Exception('ERROR parsing output: "' + str(output) + '"')
def test_command_line_client(test_state):
print("TESTING CMD LINE CLIENT")
# Create a Project
output = run(test_state,
'synapse',
'--skip-checks',
'create',
'-name',
str(uuid.uuid4()),
'-description',
'test of command line client',
'Project')
project_id = parse(r'Created entity:\s+(syn\d+)\s+', output)
test_state.schedule_for_cleanup(project_id)
# Create a File
filename = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(filename)
output = run(test_state,
'synapse',
'--skip-checks',
'add',
'-name',
'BogusFileEntity',
'-description',
'Bogus data to test file upload',
'-parentid',
project_id,
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we stored the file in Synapse
f1 = test_state.syn.get(file_entity_id)
fh = test_state.syn._get_file_handle_as_creator(f1.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'
# Get File from the command line
output = run(test_state,
'synapse',
'--skip-checks',
'get',
file_entity_id)
downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
test_state.schedule_for_cleanup(downloaded_filename)
assert os.path.exists(downloaded_filename)
assert filecmp.cmp(filename, downloaded_filename)
# Update the File
filename = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(filename)
output = run(test_state,
'synapse',
'--skip-checks',
'store',
'--id',
file_entity_id,
filename)
# Get the File again
output = run(test_state,
'synapse',
'--skip-checks',
'get',
file_entity_id)
downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
test_state.schedule_for_cleanup(downloaded_filename)
assert os.path.exists(downloaded_filename)
assert filecmp.cmp(filename, downloaded_filename)
# Store the same file and don't force a new version
# Get the existing file to determine it's current version
current_file = test_state.syn.get(file_entity_id, downloadFile=False)
current_version = current_file.versionNumber
# Store it without forcing version
output = run(test_state,
'synapse',
'--skip-checks',
'store',
'--noForceVersion',
'--id',
file_entity_id,
filename)
# Get the File again and check that the version did not change
new_file = test_state.syn.get(file_entity_id, downloadFile=False)
new_version = new_file.versionNumber
assert current_version == new_version
# Move the file to new folder
folder = test_state.syn.store(Folder(parentId=project_id))
output = run(test_state,
'synapse',
'mv',
'--id',
file_entity_id,
'--parentid',
folder.id)
movedFile = test_state.syn.get(file_entity_id, downloadFile=False)
assert movedFile.parentId == folder.id
# Test Provenance
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
output = run(test_state,
'synapse',
'--skip-checks',
'set-provenance',
'-id',
file_entity_id,
'-name',
'TestActivity',
'-description',
'A very excellent provenance',
'-used',
file_entity_id,
'-executed',
repo_url)
output = run(test_state,
'synapse',
'--skip-checks',
'get-provenance',
'--id',
file_entity_id)
activity = json.loads(output)
assert activity['name'] == 'TestActivity'
assert activity['description'] == 'A very excellent provenance'
used = utils._find_used(activity, lambda used: 'reference' in used)
assert used['reference']['targetId'] == file_entity_id
used = utils._find_used(activity, lambda used: 'url' in used)
assert used['url'] == repo_url
assert used['wasExecuted']
# Note: Tests shouldn't have external dependencies
# but this is a pretty picture of Singapore
singapore_url = 'http://upload.wikimedia.org/wikipedia/commons/' \
'thumb/3/3e/1_singapore_city_skyline_dusk_panorama_2011.jpg' \
'/1280px-1_singapore_city_skyline_dusk_panorama_2011.jpg'
# Test external file handle
output = run(test_state,
'synapse',
'--skip-checks',
'add',
'-name',
'Singapore',
'-description',
'A nice picture of Singapore',
'-parentid',
project_id,
singapore_url)
exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we created an external file handle
f2 = test_state.syn.get(exteral_entity_id)
fh = test_state.syn._get_file_handle_as_creator(f2.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.ExternalFileHandle'
output = run(test_state,
'synapse',
'--skip-checks',
'get',
exteral_entity_id)
downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
test_state.schedule_for_cleanup(downloaded_filename)
assert os.path.exists(downloaded_filename)
# Delete the Project
run(test_state,
'synapse'
'--skip-checks', 'delete', project_id)
def test_command_line_client_annotations(test_state):
# Create a Project
output = run(test_state,
'synapse',
'--skip-checks',
'create',
'-name',
str(uuid.uuid4()),
'-description',
'test of command line client',
'Project')
project_id = parse(r'Created entity:\s+(syn\d+)\s+', output)
test_state.schedule_for_cleanup(project_id)
# Create a File
filename = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(filename)
output = run(test_state,
'synapse',
'--skip-checks',
'add',
'-name',
'BogusFileEntity',
'-description',
'Bogus data to test file upload',
'-parentid',
project_id,
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Test setting annotations
run(test_state,
'synapse'
'--skip-checks',
'set-annotations',
'--id',
file_entity_id,
'--annotations',
'{"foo": 1, "bar": "1", "baz": [1, 2, 3]}')
# Test getting annotations
# check that the three things set are correct
# This test should be adjusted to check for equality of the
# whole annotation dictionary once the issue of other
# attributes (creationDate, eTag, id, uri) being returned is resolved
# See: https://sagebionetworks.jira.com/browse/SYNPY-175
output = run(test_state,
'synapse'
'--skip-checks',
'get-annotations',
'--id',
file_entity_id)
annotations = json.loads(output)
assert annotations['foo'] == [1]
assert annotations['bar'] == [u"1"]
assert annotations['baz'] == [1, 2, 3]
# Test setting annotations by replacing existing ones.
output = run(test_state,
'synapse'
'--skip-checks',
'set-annotations',
'--id',
file_entity_id,
'--annotations',
'{"foo": 2}',
'--replace')
# Test that the annotation was updated
output = run(test_state,
'synapse'
'--skip-checks',
'get-annotations',
'--id',
file_entity_id)
annotations = json.loads(output)
assert annotations['foo'] == [2]
# Since this replaces the existing annotations, previous values
# Should not be available.
pytest.raises(KeyError, lambda key: annotations[key], 'bar')
pytest.raises(KeyError, lambda key: annotations[key], 'baz')
# Test running add command to set annotations on a new object
filename2 = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(filename2)
output = run(test_state,
'synapse'
'--skip-checks',
'add',
'-name',
'BogusData2',
'-description',
'Bogus data to test file upload with add and add annotations',
'-parentid',
project_id,
'--annotations',
'{"foo": 123}',
filename2)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Test that the annotation was updated
output = run(test_state,
'synapse'
'--skip-checks',
'get-annotations',
'--id',
file_entity_id)
annotations = json.loads(output)
assert annotations['foo'] == [123]
# Test running store command to set annotations on a new object
filename3 = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(filename3)
output = run(test_state,
'synapse'
'--skip-checks',
'store',
'--name',
'BogusData3',
'--description',
'\"Bogus data to test file upload with store and add annotations\"',
'--parentid',
project_id,
'--annotations',
'{"foo": 456}',
filename3)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Test that the annotation was updated
output = run(test_state,
'synapse'
'--skip-checks',
'get-annotations',
'--id',
file_entity_id)
annotations = json.loads(output)
assert annotations['foo'] == [456]
def test_command_line_store_and_submit(test_state):
# Create a Project
output = run(test_state,
'synapse'
'--skip-checks',
'store',
'--name',
str(uuid.uuid4()),
'--description',
'test of store command',
'--type',
'Project')
project_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
test_state.schedule_for_cleanup(project_id)
# Create and upload a file
filename = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(filename)
output = run(test_state,
'synapse'
'--skip-checks',
'store',
'--description',
'Bogus data to test file upload',
'--parentid',
project_id,
'--file',
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we stored the file in Synapse
f1 = test_state.syn.get(file_entity_id)
fh = test_state.syn._get_file_handle_as_creator(f1.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'
# Test that entity is named after the file it contains
assert f1.name == os.path.basename(filename)
# Create an Evaluation to submit to
eval = Evaluation(name=str(uuid.uuid4()), contentSource=project_id)
eval = test_state.syn.store(eval)
test_state.schedule_for_cleanup(eval)
# Submit a bogus file
output = run(test_state,
'synapse'
'--skip-checks',
'submit',
'--evaluation',
eval.id,
'--name',
'Some random name',
'--entity',
file_entity_id)
parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
# testing different commmand line options for submitting to an evaluation
# submitting to an evaluation by evaluationID
output = run(test_state,
'synapse'
'--skip-checks',
'submit',
'--evalID',
eval.id,
'--name',
'Some random name',
'--alias',
'My Team',
'--entity',
file_entity_id)
parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
# Update the file
filename = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(filename)
output = run(test_state,
'synapse'
'--skip-checks',
'store',
'--id',
file_entity_id,
'--file',
filename)
updated_entity_id = parse(r'Updated entity:\s+(syn\d+)', output)
test_state.schedule_for_cleanup(updated_entity_id)
# Submit an updated bogus file and this time by evaluation name
run(test_state,
'synapse'
'--skip-checks',
'submit',
'--evaluationName',
eval.name,
'--entity',
file_entity_id)
# Tests shouldn't have external dependencies, but here it's required
ducky_url = 'https://www.synapse.org/Portal/clear.cache.gif'
# Test external file handle
output = run(test_state,
'synapse'
'--skip-checks',
'store',
'--name',
'Rubber Ducky',
'--description',
'I like rubber duckies',
'--parentid',
project_id,
'--file',
ducky_url)
exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
test_state.schedule_for_cleanup(exteral_entity_id)
# Verify that we created an external file handle
f2 = test_state.syn.get(exteral_entity_id)
fh = test_state.syn._get_file_handle_as_creator(f2.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.ExternalFileHandle'
# submit an external file to an evaluation and use provenance
filename = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(filename)
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
run(test_state,
'synapse'
'--skip-checks', 'submit',
'--evalID', eval.id,
'--file', filename,
'--parent', project_id,
'--used', exteral_entity_id,
'--executed', repo_url)
# Delete project
run(test_state,
'synapse'
'--skip-checks', 'delete', project_id)
def test_command_get_recursive_and_query(test_state):
"""Tests the 'synapse get -r' and 'synapse get -q' functions"""
project_entity = test_state.project
# Create Folders in Project
folder_entity = test_state.syn.store(Folder(name=str(uuid.uuid4()),
parent=project_entity))
folder_entity2 = test_state.syn.store(Folder(name=str(uuid.uuid4()),
parent=folder_entity))
# Create and upload two files in sub-Folder
uploaded_paths = []
file_entities = []
for i in range(2):
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
test_state.schedule_for_cleanup(f)
file_entity = File(f, parent=folder_entity2)
file_entity = test_state.syn.store(file_entity)
file_entities.append(file_entity)
test_state.schedule_for_cleanup(f)
# Add a file in the Folder as well
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
test_state.schedule_for_cleanup(f)
file_entity = File(f, parent=folder_entity)
file_entity = test_state.syn.store(file_entity)
file_entities.append(file_entity)
# get -r uses syncFromSynapse() which uses getChildren(), which is not immediately consistent,
# but faster than chunked queries.
time.sleep(2)
# Test recursive get
run(test_state,
'synapse'
'--skip-checks', 'get', '-r', folder_entity.id)
# Verify that we downloaded files:
new_paths = [os.path.join('.', folder_entity2.name, os.path.basename(f)) for f in uploaded_paths[:-1]]
new_paths.append(os.path.join('.', os.path.basename(uploaded_paths[-1])))
test_state.schedule_for_cleanup(folder_entity.name)
for downloaded, uploaded in zip(new_paths, uploaded_paths):
assert os.path.exists(downloaded)
assert filecmp.cmp(downloaded, uploaded)
test_state.schedule_for_cleanup(downloaded)
# Test query get using a Table with an entity column
# This should be replaced when Table File Views are implemented in the client
cols = [Column(name='id', columnType='ENTITYID')]
schema1 = test_state.syn.store(Schema(name='Foo Table', columns=cols, parent=project_entity))
test_state.schedule_for_cleanup(schema1.id)
data1 = [[x.id] for x in file_entities]
test_state.syn.store(RowSet(schema=schema1, rows=[Row(r) for r in data1]))
time.sleep(3) # get -q are eventually consistent
# Test Table/View query get
run(test_state,
'synapse'
'--skip-checks', 'get', '-q',
"select id from %s" % schema1.id)
# Verify that we downloaded files:
new_paths = [os.path.join('.', os.path.basename(f)) for f in uploaded_paths[:-1]]
new_paths.append(os.path.join('.', os.path.basename(uploaded_paths[-1])))
test_state.schedule_for_cleanup(folder_entity.name)
for downloaded, uploaded in zip(new_paths, uploaded_paths):
assert os.path.exists(downloaded)
assert filecmp.cmp(downloaded, uploaded)
test_state.schedule_for_cleanup(downloaded)
test_state.schedule_for_cleanup(new_paths[0])
def test_command_copy(test_state):
"""Tests the 'synapse cp' function"""
# Create a Project
project_entity = test_state.syn.store(Project(name=str(uuid.uuid4())))
test_state.schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = test_state.syn.store(Folder(name=str(uuid.uuid4()),
parent=project_entity))
test_state.schedule_for_cleanup(folder_entity.id)
# Create and upload a file in Folder
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
annots = {'test': ['hello_world']}
# Create, upload, and set annotations on a file in Folder
filename = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(filename)
file_entity = test_state.syn.store(File(filename, parent=folder_entity))
externalURL_entity = test_state.syn.store(File(repo_url, name='rand', parent=folder_entity, synapseStore=False))
test_state.syn.set_annotations(Annotations(file_entity, file_entity.etag, annots))
test_state.syn.set_annotations(Annotations(externalURL_entity, externalURL_entity.etag, annots))
test_state.schedule_for_cleanup(file_entity.id)
test_state.schedule_for_cleanup(externalURL_entity.id)
# Test cp function
output = run(test_state,
'synapse'
'--skip-checks', 'cp', file_entity.id, '--destinationId', project_entity.id)
output_URL = run(test_state,
'synapse'
'--skip-checks', 'cp', externalURL_entity.id, '--destinationId', project_entity.id)
copied_id = parse(r'Copied syn\d+ to (syn\d+)', output)
copied_URL_id = parse(r'Copied syn\d+ to (syn\d+)', output_URL)
# Verify that our copied files are identical
copied_ent = test_state.syn.get(copied_id)
copied_URL_ent = test_state.syn.get(copied_URL_id, downloadFile=False)
test_state.schedule_for_cleanup(copied_id)
test_state.schedule_for_cleanup(copied_URL_id)
copied_ent_annot = test_state.syn.get_annotations(copied_id)
copied_url_annot = test_state.syn.get_annotations(copied_URL_id)
copied_prov = test_state.syn.getProvenance(copied_id)['used'][0]['reference']['targetId']
copied_url_prov = test_state.syn.getProvenance(copied_URL_id)['used'][0]['reference']['targetId']
# Make sure copied files are the same
assert copied_prov == file_entity.id
assert copied_ent_annot == annots
assert copied_ent.properties.dataFileHandleId == file_entity.properties.dataFileHandleId
# Make sure copied URLs are the same
assert copied_url_prov == externalURL_entity.id
assert copied_url_annot == annots
assert copied_URL_ent.externalURL == repo_url
assert copied_URL_ent.name == 'rand'
assert copied_URL_ent.properties.dataFileHandleId == externalURL_entity.properties.dataFileHandleId
# Verify that errors are being thrown when a
# file is copied to a folder/project that has a file with the same filename
pytest.raises(ValueError, run, test_state, 'synapse', '--debug', '--skip-checks', 'cp', file_entity.id,
'--destinationId', project_entity.id)
def test_command_line_using_paths(test_state):
# Create a Project
project_entity = test_state.syn.store(Project(name=str(uuid.uuid4())))
test_state.schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = test_state.syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
# Create and upload a file in Folder
filename = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(filename)
file_entity = test_state.syn.store(File(filename, parent=folder_entity))
# Verify that we can use show with a filename
output = run(test_state,
'synapse'
'--skip-checks', 'show', filename)
id = parse(r'File: %s\s+\((syn\d+)\)\s+' % os.path.split(filename)[1], output)
assert file_entity.id == id
# Verify that limitSearch works by making sure we get the file entity
# that's inside the folder
file_entity2 = test_state.syn.store(File(filename, parent=project_entity))
output = run(test_state,
'synapse'
'--skip-checks', 'get',
'--limitSearch', folder_entity.id,
filename)
id = parse(r'Associated file: .* with synapse ID (syn\d+)', output)
name = parse(r'Associated file: (.*) with synapse ID syn\d+', output)
assert file_entity.id == id
assert utils.equal_paths(name, filename)
# Verify that set-provenance works with filepath
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
output = run(test_state,
'synapse'
'--skip-checks', 'set-provenance',
'-id', file_entity2.id,
'-name', 'TestActivity',
'-description', 'A very excellent provenance',
'-used', filename,
'-executed', repo_url,
'-limitSearch', folder_entity.id)
parse(r'Set provenance record (\d+) on entity syn\d+', output)
output = run(test_state,
'synapse'
'--skip-checks', 'get-provenance',
'-id', file_entity2.id)
activity = json.loads(output)
assert activity['name'] == 'TestActivity'
assert activity['description'] == 'A very excellent provenance'
# Verify that store works with provenance specified with filepath
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
filename2 = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(filename2)
output = run(test_state,
'synapse'
'--skip-checks', 'add', filename2,
'-parentid', project_entity.id,
'-used', filename,
'-executed', '%s %s' % (repo_url, filename))
entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
output = run(test_state,
'synapse'
'--skip-checks', 'get-provenance',
'-id', entity_id)
activity = json.loads(output)
a = [a for a in activity['used'] if not a['wasExecuted']]
assert a[0]['reference']['targetId'] in [file_entity.id, file_entity2.id]
# Test associate command
# I have two files in Synapse filename and filename2
path = tempfile.mkdtemp()
test_state.schedule_for_cleanup(path)
shutil.copy(filename, path)
shutil.copy(filename2, path)
run(test_state,
'synapse'
'--skip-checks', 'associate', path, '-r')
run(test_state,
'synapse'
'--skip-checks', 'show', filename)
def test_table_query(test_state):
"""Test command line ability to do table query."""
cols = [Column(name='name', columnType='STRING', maximumSize=1000),
Column(name='foo', columnType='STRING', enumValues=['foo', 'bar', 'bat']),
Column(name='x', columnType='DOUBLE'),
Column(name='age', columnType='INTEGER'),
Column(name='cartoon', columnType='BOOLEAN')]
project_entity = test_state.project
schema1 = test_state.syn.store(Schema(name=str(uuid.uuid4()), columns=cols, parent=project_entity))
test_state.schedule_for_cleanup(schema1.id)
data1 = [['Chris', 'bar', 11.23, 45, False],
['Jen', 'bat', 14.56, 40, False],
['Jane', 'bat', 17.89, 6, False],
['Henry', 'bar', 10.12, 1, False]]
test_state.syn.store(RowSet(schema=schema1, rows=[Row(r) for r in data1]))
# Test query
output = run(test_state,
'synapse'
'--skip-checks', 'query',
'select * from %s' % schema1.id)
output_rows = output.rstrip("\n").split("\n")
# Check the length of the output
assert len(output_rows) == 5, "got %s rows" % (len(output_rows),)
# Check that headers are correct.
# Should be column names in schema plus the ROW_ID and ROW_VERSION
my_headers_set = output_rows[0].split("\t")
expected_headers_set = ["ROW_ID", "ROW_VERSION"] + list(map(lambda x: x.name, cols))
assert my_headers_set == expected_headers_set, "%r != %r" % (my_headers_set, expected_headers_set)
def test_login(test_state):
alt_syn = Synapse()
username = "username"
password = "password"
with patch.object(alt_syn, "login") as mock_login, \
patch.object(alt_syn, "getUserProfile", return_value={"userName": "test_user", "ownerId": "ownerId"})\
as mock_get_user_profile:
run(test_state,
'synapse'
'--skip-checks', 'login',
'-u', username,
'-p', password,
'--rememberMe',
syn=alt_syn)
mock_login.assert_called_once_with(username, password=password, forced=True, rememberMe=True, silent=False)
mock_get_user_profile.assert_called_once_with()
def test_configPath(test_state):
"""Test using a user-specified configPath for Synapse configuration file."""
tmp_config_file = tempfile.NamedTemporaryFile(suffix='.synapseConfig', delete=False)
shutil.copyfile(client.CONFIG_FILE, tmp_config_file.name)
# Create a File
filename = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(filename)
output = run(test_state,
'synapse'
'--skip-checks',
'--configPath',
tmp_config_file.name,
'add',
'-name',
'BogusFileEntityTwo',
'-description',
'Bogus data to test file upload',
'-parentid',
test_state.project.id,
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we stored the file in Synapse
f1 = test_state.syn.get(file_entity_id)
fh = test_state.syn._get_file_handle_as_creator(f1.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'
def _description_wiki_check(syn, run_output, expected_description):
entity_id = parse(r'Created.* entity:\s+(syn\d+)\s+', run_output)
wiki = syn.getWiki(entity_id)
assert expected_description == wiki.markdown
def _create_temp_file_with_cleanup(schedule_for_cleanup, specific_file_text=None):
if specific_file_text:
with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as file:
file.write(specific_file_text)
filename = file.name
else:
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
return filename
def test_create__with_description(test_state):
output = run(test_state,
'synapse',
'create',
'Folder',
'-name',
str(uuid.uuid4()),
'-parentid',
test_state.project.id,
'--description',
test_state.description_text
)
_description_wiki_check(test_state.syn, output, test_state.description_text)
def test_store__with_description(test_state):
output = run(test_state,
'synapse',
'store',
test_state.upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
test_state.project.id,
'--description',
test_state.description_text
)
_description_wiki_check(test_state.syn, output, test_state.description_text)
def test_add__with_description(test_state):
output = run(test_state,
'synapse',
'add',
test_state.upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
test_state.project.id,
'--description',
test_state.description_text
)
_description_wiki_check(test_state.syn, output, test_state.description_text)
def test_create__with_descriptionFile(test_state):
output = run(test_state,
'synapse',
'create',
'Folder',
'-name',
str(uuid.uuid4()),
'-parentid',
test_state.project.id,
'--descriptionFile',
test_state.desc_filename
)
_description_wiki_check(test_state.syn, output, test_state.description_text)
def test_store__with_descriptionFile(test_state):
output = run(test_state,
'synapse',
'store',
test_state.upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
test_state.project.id,
'--descriptionFile',
test_state.desc_filename
)
_description_wiki_check(test_state.syn, output, test_state.description_text)
def test_add__with_descriptionFile(test_state):
output = run(test_state,
'synapse',
'add',
test_state.upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
test_state.project.id,
'--descriptionFile',
test_state.desc_filename
)
_description_wiki_check(test_state.syn, output, test_state.description_text)
def test_create__update_description(test_state):
name = str(uuid.uuid4())
output = run(test_state,
'synapse',
'create',
'Folder',
'-name',
name,
'-parentid',
test_state.project.id,
'--descriptionFile',
test_state.desc_filename
)
_description_wiki_check(test_state.syn, output, test_state.description_text)
output = run(test_state,
'synapse',
'create',
'Folder',
'-name',
name,
'-parentid',
test_state.project.id,
'--description',
test_state.update_description_text
)
_description_wiki_check(test_state.syn, output, test_state.update_description_text)
def test_store__update_description(test_state):
name = str(uuid.uuid4())
output = run(test_state,
'synapse',
'store',
test_state.upload_filename,
'-name',
name,
'-parentid',
test_state.project.id,
'--descriptionFile',
test_state.desc_filename
)
_description_wiki_check(test_state.syn, output, test_state.description_text)
output = run(test_state,
'synapse',
'store',
test_state.upload_filename,
'-name',
name,
'-parentid',
test_state.project.id,
'--description',
test_state.update_description_text
)
_description_wiki_check(test_state.syn, output, test_state.update_description_text)
def test_add__update_description(test_state):
name = str(uuid.uuid4())
output = run(test_state,
'synapse',
'add',
test_state.upload_filename,
'-name',
name,
'-parentid',
test_state.project.id,
'--descriptionFile',
test_state.desc_filename
)
_description_wiki_check(test_state.syn, output, test_state.description_text)
output = run(test_state,
'synapse',
'add',
test_state.upload_filename,
'-name',
name,
'-parentid',
test_state.project.id,
'--description',
test_state.update_description_text
)
_description_wiki_check(test_state.syn, output, test_state.update_description_text)
def test_create__same_project_name(test_state):
"""Test creating project that already exists returns the existing project.
"""
name = str(uuid.uuid4())
output_first = run(test_state,
'synapse',
'create',
'--name',
name,
'Project')
entity_id_first = parse(r'Created entity:\s+(syn\d+)\s+',
output_first)
test_state.schedule_for_cleanup(entity_id_first)
output_second = run(test_state,
'synapse',
'create',
'--name',
name,
'Project')
entity_id_second = parse(r'Created entity:\s+(syn\d+)\s+',
output_second)
assert entity_id_first == entity_id_second
@patch.object(utils.sys.stdin, 'isatty')
def test_storeTable__csv(mock_sys, test_state):
# when running on windows os with multiple CPU, the sys.stdin.isatty will return True
# Thus we mock the utils.sys.stdin.
mock_sys.return_value = False
output = run(test_state,
'synapse',
'store-table',
'--csv',
test_state.desc_filename,
'--name',
str(uuid.uuid4()),
'--parentid',
test_state.project.id
)
mapping = json.loads(output)
test_state.schedule_for_cleanup(mapping['tableId'])
| apache-2.0 | -5,918,253,265,935,864,000 | 34.618049 | 116 | 0.555524 | false |
SrNetoChan/QGIS | tests/src/python/featuresourcetestbase.py | 14 | 50722 | # -*- coding: utf-8 -*-
"""QGIS Unit test utils for QgsFeatureSource subclasses.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import str
from builtins import object
__author__ = 'Nyall Dawson'
__date__ = '2017-05-25'
__copyright__ = 'Copyright 2017, The QGIS Project'
from qgis.core import (
QgsRectangle,
QgsFeatureRequest,
QgsFeature,
QgsWkbTypes,
QgsProject,
QgsGeometry,
QgsAbstractFeatureIterator,
QgsExpressionContextScope,
QgsExpressionContext,
QgsVectorLayerFeatureSource,
QgsCoordinateReferenceSystem,
NULL
)
from qgis.PyQt.QtCore import QDate, QTime, QDateTime
from utilities import compareWkt
class FeatureSourceTestCase(object):
'''
This is a collection of tests for QgsFeatureSources subclasses and kept generic.
To make use of it, subclass it and set self.source to a QgsFeatureSource you want to test.
Make sure that your source uses the default dataset by converting one of the provided datasets from the folder
tests/testdata/source to a dataset your source is able to handle.
'''
def treat_date_as_datetime(self):
return False
def treat_datetime_as_string(self):
return False
def treat_date_as_string(self):
return False
def treat_time_as_string(self):
return False
def testCrs(self):
self.assertEqual(self.source.sourceCrs().authid(), 'EPSG:4326')
def testWkbType(self):
self.assertEqual(self.source.wkbType(), QgsWkbTypes.Point)
def testFeatureCount(self):
self.assertEqual(self.source.featureCount(), 5)
self.assertEqual(len(self.source), 5)
def testFields(self):
fields = self.source.fields()
for f in ('pk', 'cnt', 'name', 'name2', 'num_char'):
self.assertTrue(fields.lookupField(f) >= 0)
def testGetFeatures(self, source=None, extra_features=[], skip_features=[], changed_attributes={},
changed_geometries={}):
""" Test that expected results are returned when fetching all features """
# IMPORTANT - we do not use `for f in source.getFeatures()` as we are also
# testing that existing attributes & geometry in f are overwritten correctly
# (for f in ... uses a new QgsFeature for every iteration)
if not source:
source = self.source
it = source.getFeatures()
f = QgsFeature()
attributes = {}
geometries = {}
while it.nextFeature(f):
# expect feature to be valid
self.assertTrue(f.isValid())
# some source test datasets will include additional attributes which we ignore,
# so cherry pick desired attributes
attrs = [f['pk'], f['cnt'], f['name'], f['name2'], f['num_char'], f['dt'], f['date'], f['time']]
# force the num_char attribute to be text - some sources (e.g., delimited text) will
# automatically detect that this attribute contains numbers and set it as a numeric
# field
attrs[4] = str(attrs[4])
attributes[f['pk']] = attrs
geometries[f['pk']] = f.hasGeometry() and f.geometry().asWkt()
expected_attributes = {5: [5, -200, NULL, 'NuLl', '5', QDateTime(QDate(2020, 5, 4), QTime(12, 13, 14)) if not self.treat_datetime_as_string() else '2020-05-04 12:13:14', QDate(2020, 5, 2) if not self.treat_date_as_datetime() and not self.treat_date_as_string() else QDateTime(2020, 5, 2, 0, 0, 0) if not self.treat_date_as_string() else '2020-05-02', QTime(12, 13, 1) if not self.treat_time_as_string() else '12:13:01'],
3: [3, 300, 'Pear', 'PEaR', '3', NULL, NULL, NULL],
1: [1, 100, 'Orange', 'oranGe', '1', QDateTime(QDate(2020, 5, 3), QTime(12, 13, 14)) if not self.treat_datetime_as_string() else '2020-05-03 12:13:14', QDate(2020, 5, 3) if not self.treat_date_as_datetime() and not self.treat_date_as_string() else QDateTime(2020, 5, 3, 0, 0, 0) if not self.treat_date_as_string() else '2020-05-03', QTime(12, 13, 14) if not self.treat_time_as_string() else '12:13:14'],
2: [2, 200, 'Apple', 'Apple', '2', QDateTime(QDate(2020, 5, 4), QTime(12, 14, 14)) if not self.treat_datetime_as_string() else '2020-05-04 12:14:14', QDate(2020, 5, 4) if not self.treat_date_as_datetime() and not self.treat_date_as_string() else QDateTime(2020, 5, 4, 0, 0, 0) if not self.treat_date_as_string() else '2020-05-04', QTime(12, 14, 14) if not self.treat_time_as_string() else '12:14:14'],
4: [4, 400, 'Honey', 'Honey', '4', QDateTime(QDate(2021, 5, 4), QTime(13, 13, 14)) if not self.treat_datetime_as_string() else '2021-05-04 13:13:14', QDate(2021, 5, 4) if not self.treat_date_as_datetime() and not self.treat_date_as_string() else QDateTime(2021, 5, 4, 0, 0, 0) if not self.treat_date_as_string() else '2021-05-04', QTime(13, 13, 14) if not self.treat_time_as_string() else '13:13:14']}
expected_geometries = {1: 'Point (-70.332 66.33)',
2: 'Point (-68.2 70.8)',
3: None,
4: 'Point(-65.32 78.3)',
5: 'Point(-71.123 78.23)'}
for f in extra_features:
expected_attributes[f[0]] = f.attributes()
if f.hasGeometry():
expected_geometries[f[0]] = f.geometry().asWkt()
else:
expected_geometries[f[0]] = None
for i in skip_features:
del expected_attributes[i]
del expected_geometries[i]
for i, a in changed_attributes.items():
for attr_idx, v in a.items():
expected_attributes[i][attr_idx] = v
for i, g, in changed_geometries.items():
if g:
expected_geometries[i] = g.asWkt()
else:
expected_geometries[i] = None
self.assertEqual(attributes, expected_attributes, 'Expected {}, got {}'.format(expected_attributes, attributes))
self.assertEqual(len(expected_geometries), len(geometries))
for pk, geom in list(expected_geometries.items()):
if geom:
assert compareWkt(geom, geometries[pk]), "Geometry {} mismatch Expected:\n{}\nGot:\n{}\n".format(pk,
geom,
geometries[
pk])
else:
self.assertFalse(geometries[pk], 'Expected null geometry for {}'.format(pk))
def assert_query(self, source, expression, expected):
request = QgsFeatureRequest().setFilterExpression(expression).setFlags(QgsFeatureRequest.NoGeometry)
result = set([f['pk'] for f in source.getFeatures(request)])
assert set(expected) == result, 'Expected {} and got {} when testing expression "{}"'.format(set(expected),
result, expression)
self.assertTrue(all(f.isValid() for f in source.getFeatures(request)))
# Also check that filter works when referenced fields are not being retrieved by request
result = set([f['pk'] for f in source.getFeatures(
QgsFeatureRequest().setFilterExpression(expression).setSubsetOfAttributes(['pk'], self.source.fields()))])
assert set(
expected) == result, 'Expected {} and got {} when testing expression "{}" using empty attribute subset'.format(
set(expected), result, expression)
# test that results match QgsFeatureRequest.acceptFeature
request = QgsFeatureRequest().setFilterExpression(expression)
for f in source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in expected)
def runGetFeatureTests(self, source):
self.assertEqual(len([f for f in source.getFeatures()]), 5)
self.assert_query(source, 'name ILIKE \'QGIS\'', [])
self.assert_query(source, '"name" IS NULL', [5])
self.assert_query(source, '"name" IS NOT NULL', [1, 2, 3, 4])
self.assert_query(source, '"name" NOT LIKE \'Ap%\'', [1, 3, 4])
self.assert_query(source, '"name" NOT ILIKE \'QGIS\'', [1, 2, 3, 4])
self.assert_query(source, '"name" NOT ILIKE \'pEAR\'', [1, 2, 4])
self.assert_query(source, 'name = \'Apple\'', [2])
self.assert_query(source, 'name <> \'Apple\'', [1, 3, 4])
self.assert_query(source, 'name = \'apple\'', [])
self.assert_query(source, '"name" <> \'apple\'', [1, 2, 3, 4])
self.assert_query(source, '(name = \'Apple\') is not null', [1, 2, 3, 4])
self.assert_query(source, 'name LIKE \'Apple\'', [2])
self.assert_query(source, 'name LIKE \'aPple\'', [])
self.assert_query(source, 'name LIKE \'Ap_le\'', [2])
self.assert_query(source, 'name LIKE \'Ap\\_le\'', [])
self.assert_query(source, 'name ILIKE \'aPple\'', [2])
self.assert_query(source, 'name ILIKE \'%pp%\'', [2])
self.assert_query(source, 'cnt > 0', [1, 2, 3, 4])
self.assert_query(source, '-cnt > 0', [5])
self.assert_query(source, 'cnt < 0', [5])
self.assert_query(source, '-cnt < 0', [1, 2, 3, 4])
self.assert_query(source, 'cnt >= 100', [1, 2, 3, 4])
self.assert_query(source, 'cnt <= 100', [1, 5])
self.assert_query(source, 'pk IN (1, 2, 4, 8)', [1, 2, 4])
self.assert_query(source, 'cnt = 50 * 2', [1])
self.assert_query(source, 'cnt = 150 / 1.5', [1])
self.assert_query(source, 'cnt = 1000 / 10', [1])
self.assert_query(source, 'cnt = 1000/11+10', []) # checks that source isn't rounding int/int
self.assert_query(source, 'pk = 9 // 4', [2]) # int division
self.assert_query(source, 'cnt = 99 + 1', [1])
self.assert_query(source, 'cnt = 101 - 1', [1])
self.assert_query(source, 'cnt - 1 = 99', [1])
self.assert_query(source, '-cnt - 1 = -101', [1])
self.assert_query(source, '-(-cnt) = 100', [1])
self.assert_query(source, '-(cnt) = -(100)', [1])
self.assert_query(source, 'cnt + 1 = 101', [1])
self.assert_query(source, 'cnt = 1100 % 1000', [1])
self.assert_query(source, '"name" || \' \' || "name" = \'Orange Orange\'', [1])
self.assert_query(source, '"name" || \' \' || "cnt" = \'Orange 100\'', [1])
self.assert_query(source, '\'x\' || "name" IS NOT NULL', [1, 2, 3, 4])
self.assert_query(source, '\'x\' || "name" IS NULL', [5])
self.assert_query(source, 'cnt = 10 ^ 2', [1])
self.assert_query(source, '"name" ~ \'[OP]ra[gne]+\'', [1])
self.assert_query(source, '"name"="name2"', [2, 4]) # mix of matched and non-matched case sensitive names
self.assert_query(source, 'true', [1, 2, 3, 4, 5])
self.assert_query(source, 'false', [])
# Three value logic
self.assert_query(source, 'false and false', [])
self.assert_query(source, 'false and true', [])
self.assert_query(source, 'false and NULL', [])
self.assert_query(source, 'true and false', [])
self.assert_query(source, 'true and true', [1, 2, 3, 4, 5])
self.assert_query(source, 'true and NULL', [])
self.assert_query(source, 'NULL and false', [])
self.assert_query(source, 'NULL and true', [])
self.assert_query(source, 'NULL and NULL', [])
self.assert_query(source, 'false or false', [])
self.assert_query(source, 'false or true', [1, 2, 3, 4, 5])
self.assert_query(source, 'false or NULL', [])
self.assert_query(source, 'true or false', [1, 2, 3, 4, 5])
self.assert_query(source, 'true or true', [1, 2, 3, 4, 5])
self.assert_query(source, 'true or NULL', [1, 2, 3, 4, 5])
self.assert_query(source, 'NULL or false', [])
self.assert_query(source, 'NULL or true', [1, 2, 3, 4, 5])
self.assert_query(source, 'NULL or NULL', [])
self.assert_query(source, 'not true', [])
self.assert_query(source, 'not false', [1, 2, 3, 4, 5])
self.assert_query(source, 'not null', [])
# not
self.assert_query(source, 'not name = \'Apple\'', [1, 3, 4])
self.assert_query(source, 'not name IS NULL', [1, 2, 3, 4])
self.assert_query(source, 'not name = \'Apple\' or name = \'Apple\'', [1, 2, 3, 4])
self.assert_query(source, 'not name = \'Apple\' or not name = \'Apple\'', [1, 3, 4])
self.assert_query(source, 'not name = \'Apple\' and pk = 4', [4])
self.assert_query(source, 'not name = \'Apple\' and not pk = 4', [1, 3])
self.assert_query(source, 'not pk IN (1, 2, 4, 8)', [3, 5])
# type conversion - QGIS expressions do not mind that we are comparing a string
# against numeric literals
self.assert_query(source, 'num_char IN (2, 4, 5)', [2, 4, 5])
# function
self.assert_query(source, 'sqrt(pk) >= 2', [4, 5])
self.assert_query(source, 'radians(cnt) < 2', [1, 5])
self.assert_query(source, 'degrees(pk) <= 200', [1, 2, 3])
self.assert_query(source, 'abs(cnt) <= 200', [1, 2, 5])
self.assert_query(source, 'cos(pk) < 0', [2, 3, 4])
self.assert_query(source, 'sin(pk) < 0', [4, 5])
self.assert_query(source, 'tan(pk) < 0', [2, 3, 5])
self.assert_query(source, 'acos(-1) < pk', [4, 5])
self.assert_query(source, 'asin(1) < pk', [2, 3, 4, 5])
self.assert_query(source, 'atan(3.14) < pk', [2, 3, 4, 5])
self.assert_query(source, 'atan2(3.14, pk) < 1', [3, 4, 5])
self.assert_query(source, 'exp(pk) < 10', [1, 2])
self.assert_query(source, 'ln(pk) <= 1', [1, 2])
self.assert_query(source, 'log(3, pk) <= 1', [1, 2, 3])
self.assert_query(source, 'log10(pk) < 0.5', [1, 2, 3])
self.assert_query(source, 'round(3.14) <= pk', [3, 4, 5])
self.assert_query(source, 'round(0.314,1) * 10 = pk', [3])
self.assert_query(source, 'floor(3.14) <= pk', [3, 4, 5])
self.assert_query(source, 'ceil(3.14) <= pk', [4, 5])
self.assert_query(source, 'pk < pi()', [1, 2, 3])
self.assert_query(source, 'round(cnt / 66.67) <= 2', [1, 5])
self.assert_query(source, 'floor(cnt / 66.67) <= 2', [1, 2, 5])
self.assert_query(source, 'ceil(cnt / 66.67) <= 2', [1, 5])
self.assert_query(source, 'pk < pi() / 2', [1])
self.assert_query(source, 'pk = char(51)', [3])
self.assert_query(source, 'pk = coalesce(NULL,3,4)', [3])
self.assert_query(source, 'lower(name) = \'apple\'', [2])
self.assert_query(source, 'upper(name) = \'APPLE\'', [2])
self.assert_query(source, 'name = trim(\' Apple \')', [2])
# geometry
# azimuth and touches tests are deactivated because they do not pass for WFS source
# self.assert_query(source, 'azimuth($geometry,geom_from_wkt( \'Point (-70 70)\')) < pi()', [1, 5])
self.assert_query(source, 'x($geometry) < -70', [1, 5])
self.assert_query(source, 'y($geometry) > 70', [2, 4, 5])
self.assert_query(source, 'xmin($geometry) < -70', [1, 5])
self.assert_query(source, 'ymin($geometry) > 70', [2, 4, 5])
self.assert_query(source, 'xmax($geometry) < -70', [1, 5])
self.assert_query(source, 'ymax($geometry) > 70', [2, 4, 5])
self.assert_query(source,
'disjoint($geometry,geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'))',
[4, 5])
self.assert_query(source,
'intersects($geometry,geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'))',
[1, 2])
# self.assert_query(source, 'touches($geometry,geom_from_wkt( \'Polygon ((-70.332 66.33, -65.32 66.33, -65.32 78.3, -70.332 78.3, -70.332 66.33))\'))', [1, 4])
self.assert_query(source,
'contains(geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'),$geometry)',
[1, 2])
self.assert_query(source, 'distance($geometry,geom_from_wkt( \'Point (-70 70)\')) > 7', [4, 5])
self.assert_query(source,
'intersects($geometry,geom_from_gml( \'<gml:Polygon srsName="EPSG:4326"><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>-72.2,66.1 -65.2,66.1 -65.2,72.0 -72.2,72.0 -72.2,66.1</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs></gml:Polygon>\'))',
[1, 2])
# datetime
if self.treat_datetime_as_string():
self.assert_query(source, '"dt" <= format_date(make_datetime(2020, 5, 4, 12, 13, 14), \'yyyy-MM-dd hh:mm:ss\')', [1, 5])
self.assert_query(source, '"dt" < format_date(make_date(2020, 5, 4), \'yyyy-MM-dd hh:mm:ss\')', [1])
self.assert_query(source, '"dt" = format_date(to_datetime(\'000www14ww13ww12www4ww5ww2020\',\'zzzwwwsswwmmwwhhwwwdwwMwwyyyy\'),\'yyyy-MM-dd hh:mm:ss\')', [5])
else:
self.assert_query(source, '"dt" <= make_datetime(2020, 5, 4, 12, 13, 14)', [1, 5])
self.assert_query(source, '"dt" < make_date(2020, 5, 4)', [1])
self.assert_query(source, '"dt" = to_datetime(\'000www14ww13ww12www4ww5ww2020\',\'zzzwwwsswwmmwwhhwwwdwwMwwyyyy\')', [5])
self.assert_query(source, '"date" <= make_datetime(2020, 5, 4, 12, 13, 14)', [1, 2, 5])
self.assert_query(source, '"date" >= make_date(2020, 5, 4)', [2, 4])
if not self.treat_date_as_datetime():
self.assert_query(source,
'"date" = to_date(\'www4ww5ww2020\',\'wwwdwwMwwyyyy\')',
[2])
else:
# TODO - we don't have any expression functions which can upgrade a date value to a datetime value!
pass
if not self.treat_time_as_string():
self.assert_query(source, '"time" >= make_time(12, 14, 14)', [2, 4])
self.assert_query(source, '"time" = to_time(\'000www14ww13ww12www\',\'zzzwwwsswwmmwwhhwww\')', [1])
else:
self.assert_query(source, 'to_time("time") >= make_time(12, 14, 14)', [2, 4])
self.assert_query(source, 'to_time("time") = to_time(\'000www14ww13ww12www\',\'zzzwwwsswwmmwwhhwww\')', [1])
# TODO - enable, but needs fixing on Travis due to timezone handling issues
# if self.treat_datetime_as_string():
# self.assert_query(source, 'to_datetime("dt", \'yyyy-MM-dd hh:mm:ss\') + make_interval(days:=1) <= make_datetime(2020, 5, 4, 12, 13, 14)', [1])
# self.assert_query(source, 'to_datetime("dt", \'yyyy-MM-dd hh:mm:ss\') + make_interval(days:=0.01) <= make_datetime(2020, 5, 4, 12, 13, 14)', [1, 5])
# else:
# self.assert_query(source, '"dt" + make_interval(days:=1) <= make_datetime(2020, 5, 4, 12, 13, 14)', [1])
# self.assert_query(source, '"dt" + make_interval(days:=0.01) <= make_datetime(2020, 5, 4, 12, 13, 14)', [1, 5])
# combination of an uncompilable expression and limit
# TODO - move this test to FeatureSourceTestCase
# it's currently added in ProviderTestCase, but tests only using a QgsVectorLayer getting features,
# i.e. not directly requesting features from the provider. Turns out the WFS provider fails this
# and should be fixed - then we can enable this test at the FeatureSourceTestCase level
# feature = next(self.source.getFeatures(QgsFeatureRequest().setFilterExpression('pk=4')))
# context = QgsExpressionContext()
# scope = QgsExpressionContextScope()
# scope.setVariable('parent', feature)
# context.appendScope(scope)
# request = QgsFeatureRequest()
# request.setExpressionContext(context)
# request.setFilterExpression('"pk" = attribute(@parent, \'pk\')')
# request.setLimit(1)
# values = [f['pk'] for f in self.source.getFeatures(request)]
# self.assertEqual(values, [4])
def testGetFeaturesExp(self):
self.runGetFeatureTests(self.source)
def runOrderByTests(self):
request = QgsFeatureRequest().addOrderBy('cnt')
values = [f['cnt'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [-200, 100, 200, 300, 400])
request = QgsFeatureRequest().addOrderBy('cnt', False)
values = [f['cnt'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [400, 300, 200, 100, -200])
request = QgsFeatureRequest().addOrderBy('name')
values = [f['name'] for f in self.source.getFeatures(request)]
self.assertEqual(values, ['Apple', 'Honey', 'Orange', 'Pear', NULL])
request = QgsFeatureRequest().addOrderBy('name', True, True)
values = [f['name'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [NULL, 'Apple', 'Honey', 'Orange', 'Pear'])
request = QgsFeatureRequest().addOrderBy('name', False)
values = [f['name'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [NULL, 'Pear', 'Orange', 'Honey', 'Apple'])
request = QgsFeatureRequest().addOrderBy('name', False, False)
values = [f['name'] for f in self.source.getFeatures(request)]
self.assertEqual(values, ['Pear', 'Orange', 'Honey', 'Apple', NULL])
request = QgsFeatureRequest().addOrderBy('num_char', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
request = QgsFeatureRequest().addOrderBy('dt', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [3, 4, 2, 5, 1])
request = QgsFeatureRequest().addOrderBy('date', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [3, 4, 2, 1, 5])
request = QgsFeatureRequest().addOrderBy('time', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [3, 4, 2, 1, 5])
# Case sensitivity
request = QgsFeatureRequest().addOrderBy('name2')
values = [f['name2'] for f in self.source.getFeatures(request)]
self.assertEqual(values, ['Apple', 'Honey', 'NuLl', 'oranGe', 'PEaR'])
# Combination with LIMIT
request = QgsFeatureRequest().addOrderBy('pk', False).setLimit(2)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4])
# A slightly more complex expression
request = QgsFeatureRequest().addOrderBy('pk*2', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
# Order reversing expression
request = QgsFeatureRequest().addOrderBy('pk*-1', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [1, 2, 3, 4, 5])
# Type dependent expression
request = QgsFeatureRequest().addOrderBy('num_char*2', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
# Order by guaranteed to fail
request = QgsFeatureRequest().addOrderBy('not a valid expression*', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(set(values), set([5, 4, 3, 2, 1]))
# Multiple order bys and boolean
request = QgsFeatureRequest().addOrderBy('pk > 2').addOrderBy('pk', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [2, 1, 5, 4, 3])
# Multiple order bys, one bad, and a limit
request = QgsFeatureRequest().addOrderBy('pk', False).addOrderBy('not a valid expression*', False).setLimit(2)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4])
# Bad expression first
request = QgsFeatureRequest().addOrderBy('not a valid expression*', False).addOrderBy('pk', False).setLimit(2)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4])
# Combination with subset of attributes
request = QgsFeatureRequest().addOrderBy('num_char', False).setSubsetOfAttributes(['pk'], self.source.fields())
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
def testOrderBy(self):
self.runOrderByTests()
def testOpenIteratorAfterSourceRemoval(self):
"""
Test that removing source after opening an iterator does not crash. All required
information should be captured in the iterator's source and there MUST be no
links between the iterators and the sources's data source
"""
if not getattr(self, 'getSource', None):
return
source = self.getSource()
it = source.getFeatures()
del source
# get the features
pks = []
for f in it:
pks.append(f['pk'])
self.assertEqual(set(pks), {1, 2, 3, 4, 5})
def testGetFeaturesFidTests(self):
fids = [f.id() for f in self.source.getFeatures()]
assert len(fids) == 5, 'Expected 5 features, got {} instead'.format(len(fids))
for id in fids:
features = [f for f in self.source.getFeatures(QgsFeatureRequest().setFilterFid(id))]
self.assertEqual(len(features), 1)
feature = features[0]
self.assertTrue(feature.isValid())
result = [feature.id()]
expected = [id]
assert result == expected, 'Expected {} and got {} when testing for feature ID filter'.format(expected,
result)
# test that results match QgsFeatureRequest.acceptFeature
request = QgsFeatureRequest().setFilterFid(id)
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f.id() == id)
# bad features
it = self.source.getFeatures(QgsFeatureRequest().setFilterFid(-99999999))
feature = QgsFeature(5)
feature.setValid(False)
self.assertFalse(it.nextFeature(feature))
self.assertFalse(feature.isValid())
def testGetFeaturesFidsTests(self):
fids = [f.id() for f in self.source.getFeatures()]
self.assertEqual(len(fids), 5)
# empty list = no features
request = QgsFeatureRequest().setFilterFids([])
result = set([f.id() for f in self.source.getFeatures(request)])
self.assertFalse(result)
request = QgsFeatureRequest().setFilterFids([fids[0], fids[2]])
result = set([f.id() for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = set([fids[0], fids[2]])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f.id() in expected)
result = set(
[f.id() for f in self.source.getFeatures(QgsFeatureRequest().setFilterFids([fids[1], fids[3], fids[4]]))])
expected = set([fids[1], fids[3], fids[4]])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
# sources should ignore non-existent fids
result = set([f.id() for f in self.source.getFeatures(
QgsFeatureRequest().setFilterFids([-101, fids[1], -102, fids[3], -103, fids[4], -104]))])
expected = set([fids[1], fids[3], fids[4]])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
result = set([f.id() for f in self.source.getFeatures(QgsFeatureRequest().setFilterFids([]))])
expected = set([])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
# Rewind mid-way
request = QgsFeatureRequest().setFilterFids([fids[1], fids[3], fids[4]])
feature_it = self.source.getFeatures(request)
feature = QgsFeature()
feature.setValid(True)
self.assertTrue(feature_it.nextFeature(feature))
self.assertIn(feature.id(), [fids[1], fids[3], fids[4]])
first_feature = feature
self.assertTrue(feature.isValid())
# rewind
self.assertTrue(feature_it.rewind())
self.assertTrue(feature_it.nextFeature(feature))
self.assertEqual(feature.id(), first_feature.id())
self.assertTrue(feature.isValid())
# grab all features
self.assertTrue(feature_it.nextFeature(feature))
self.assertTrue(feature_it.nextFeature(feature))
# none left
self.assertFalse(feature_it.nextFeature(feature))
self.assertFalse(feature.isValid())
def testGetFeaturesFilterRectTests(self):
extent = QgsRectangle(-70, 67, -60, 80)
request = QgsFeatureRequest().setFilterRect(extent)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([2, 4]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in set([2, 4]))
# test with an empty rectangle
extent = QgsRectangle()
request = QgsFeatureRequest().setFilterRect(extent)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([1, 2, 3, 4, 5]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# ExactIntersection flag set, but no filter rect set. Should be ignored.
request = QgsFeatureRequest()
request.setFlags(QgsFeatureRequest.ExactIntersect)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([1, 2, 3, 4, 5]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
def testRectAndExpression(self):
extent = QgsRectangle(-70, 67, -60, 80)
request = QgsFeatureRequest().setFilterExpression('"cnt">200').setFilterRect(extent)
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = [4]
assert set(
expected) == result, 'Expected {} and got {} when testing for combination of filterRect and expression'.format(
set(expected), result)
self.assertTrue(all_valid)
# shouldn't matter what order this is done in
request = QgsFeatureRequest().setFilterRect(extent).setFilterExpression('"cnt">200')
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = [4]
assert set(
expected) == result, 'Expected {} and got {} when testing for combination of filterRect and expression'.format(
set(expected), result)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in expected)
def testGeomAndAllAttributes(self):
"""
Test combination of a filter which requires geometry and all attributes
"""
request = QgsFeatureRequest().setFilterExpression(
'attribute($currentfeature,\'cnt\')>200 and $x>=-70 and $x<=-60').setSubsetOfAttributes([]).setFlags(
QgsFeatureRequest.NoGeometry)
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.assertEqual(result, {4})
self.assertTrue(all_valid)
request = QgsFeatureRequest().setFilterExpression(
'attribute($currentfeature,\'cnt\')>200 and $x>=-70 and $x<=-60')
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.assertEqual(result, {4})
self.assertTrue(all_valid)
def testRectAndFids(self):
"""
Test the combination of a filter rect along with filterfids
"""
# first get feature ids
ids = {f['pk']: f.id() for f in self.source.getFeatures()}
extent = QgsRectangle(-70, 67, -60, 80)
request = QgsFeatureRequest().setFilterFids([ids[3], ids[4]]).setFilterRect(extent)
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = [4]
assert set(
expected) == result, 'Expected {} and got {} when testing for combination of filterRect and expression'.format(
set(expected), result)
self.assertTrue(all_valid)
# shouldn't matter what order this is done in
request = QgsFeatureRequest().setFilterRect(extent).setFilterFids([ids[3], ids[4]])
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = [4]
assert set(
expected) == result, 'Expected {} and got {} when testing for combination of filterRect and expression'.format(
set(expected), result)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in expected)
def testGetFeaturesDestinationCrs(self):
request = QgsFeatureRequest().setDestinationCrs(QgsCoordinateReferenceSystem('epsg:3785'),
QgsProject.instance().transformContext())
features = {f['pk']: f for f in self.source.getFeatures(request)}
# test that features have been reprojected
self.assertAlmostEqual(features[1].geometry().constGet().x(), -7829322, -5)
self.assertAlmostEqual(features[1].geometry().constGet().y(), 9967753, -5)
self.assertAlmostEqual(features[2].geometry().constGet().x(), -7591989, -5)
self.assertAlmostEqual(features[2].geometry().constGet().y(), 11334232, -5)
self.assertFalse(features[3].hasGeometry())
self.assertAlmostEqual(features[4].geometry().constGet().x(), -7271389, -5)
self.assertAlmostEqual(features[4].geometry().constGet().y(), 14531322, -5)
self.assertAlmostEqual(features[5].geometry().constGet().x(), -7917376, -5)
self.assertAlmostEqual(features[5].geometry().constGet().y(), 14493008, -5)
# when destination crs is set, filter rect should be in destination crs
rect = QgsRectangle(-7650000, 10500000, -7200000, 15000000)
request = QgsFeatureRequest().setDestinationCrs(QgsCoordinateReferenceSystem('epsg:3785'),
QgsProject.instance().transformContext()).setFilterRect(rect)
features = {f['pk']: f for f in self.source.getFeatures(request)}
self.assertEqual(set(features.keys()), {2, 4})
# test that features have been reprojected
self.assertAlmostEqual(features[2].geometry().constGet().x(), -7591989, -5)
self.assertAlmostEqual(features[2].geometry().constGet().y(), 11334232, -5)
self.assertAlmostEqual(features[4].geometry().constGet().x(), -7271389, -5)
self.assertAlmostEqual(features[4].geometry().constGet().y(), 14531322, -5)
# bad rect for transform
rect = QgsRectangle(-99999999999, 99999999999, -99999999998, 99999999998)
request = QgsFeatureRequest().setDestinationCrs(QgsCoordinateReferenceSystem('epsg:28356'),
QgsProject.instance().transformContext()).setFilterRect(rect)
features = [f for f in self.source.getFeatures(request)]
self.assertFalse(features)
def testGetFeaturesLimit(self):
it = self.source.getFeatures(QgsFeatureRequest().setLimit(2))
features = [f['pk'] for f in it]
assert len(features) == 2, 'Expected two features, got {} instead'.format(len(features))
# fetch one feature
feature = QgsFeature()
assert not it.nextFeature(feature), 'Expected no feature after limit, got one'
it.rewind()
features = [f['pk'] for f in it]
assert len(features) == 2, 'Expected two features after rewind, got {} instead'.format(len(features))
it.rewind()
assert it.nextFeature(feature), 'Expected feature after rewind, got none'
it.rewind()
features = [f['pk'] for f in it]
assert len(features) == 2, 'Expected two features after rewind, got {} instead'.format(len(features))
# test with expression, both with and without compilation
try:
self.disableCompiler()
except AttributeError:
pass
it = self.source.getFeatures(QgsFeatureRequest().setLimit(2).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert set(features) == set([1, 5]), 'Expected [1,5] for expression and feature limit, Got {} instead'.format(
features)
try:
self.enableCompiler()
except AttributeError:
pass
it = self.source.getFeatures(QgsFeatureRequest().setLimit(2).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert set(features) == set([1, 5]), 'Expected [1,5] for expression and feature limit, Got {} instead'.format(
features)
# limit to more features than exist
it = self.source.getFeatures(QgsFeatureRequest().setLimit(3).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert set(features) == set([1, 5]), 'Expected [1,5] for expression and feature limit, Got {} instead'.format(
features)
# limit to less features than possible
it = self.source.getFeatures(QgsFeatureRequest().setLimit(1).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert 1 in features or 5 in features, 'Expected either 1 or 5 for expression and feature limit, Got {} instead'.format(
features)
def testClosedIterators(self):
""" Test behavior of closed iterators """
# Test retrieving feature after closing iterator
f_it = self.source.getFeatures(QgsFeatureRequest())
fet = QgsFeature()
assert f_it.nextFeature(fet), 'Could not fetch feature'
assert fet.isValid(), 'Feature is not valid'
assert f_it.close(), 'Could not close iterator'
self.assertFalse(f_it.nextFeature(fet),
'Fetched feature after iterator closed, expected nextFeature() to return False')
self.assertFalse(fet.isValid(), 'Valid feature fetched from closed iterator, should be invalid')
# Test rewinding closed iterator
self.assertFalse(f_it.rewind(), 'Rewinding closed iterator successful, should not be allowed')
def testGetFeaturesSubsetAttributes(self):
""" Test that expected results are returned when using subsets of attributes """
tests = {'pk': set([1, 2, 3, 4, 5]),
'cnt': set([-200, 300, 100, 200, 400]),
'name': set(['Pear', 'Orange', 'Apple', 'Honey', NULL]),
'name2': set(['NuLl', 'PEaR', 'oranGe', 'Apple', 'Honey']),
'dt': set([NULL, '2021-05-04 13:13:14' if self.treat_datetime_as_string() else QDateTime(2021, 5, 4, 13, 13, 14) if not self.treat_datetime_as_string() else '2021-05-04 13:13:14',
'2020-05-04 12:14:14' if self.treat_datetime_as_string() else QDateTime(2020, 5, 4, 12, 14, 14) if not self.treat_datetime_as_string() else '2020-05-04 12:14:14',
'2020-05-04 12:13:14' if self.treat_datetime_as_string() else QDateTime(2020, 5, 4, 12, 13, 14) if not self.treat_datetime_as_string() else '2020-05-04 12:13:14',
'2020-05-03 12:13:14' if self.treat_datetime_as_string() else QDateTime(2020, 5, 3, 12, 13, 14) if not self.treat_datetime_as_string() else '2020-05-03 12:13:14']),
'date': set([NULL,
'2020-05-02' if self.treat_date_as_string() else QDate(2020, 5, 2) if not self.treat_date_as_datetime() else QDateTime(2020, 5, 2, 0, 0, 0),
'2020-05-03' if self.treat_date_as_string() else QDate(2020, 5, 3) if not self.treat_date_as_datetime() else QDateTime(2020, 5, 3, 0, 0, 0),
'2020-05-04' if self.treat_date_as_string() else QDate(2020, 5, 4) if not self.treat_date_as_datetime() else QDateTime(2020, 5, 4, 0, 0, 0),
'2021-05-04' if self.treat_date_as_string() else QDate(2021, 5, 4) if not self.treat_date_as_datetime() else QDateTime(2021, 5, 4, 0, 0, 0)]),
'time': set([QTime(12, 13, 1) if not self.treat_time_as_string() else '12:13:01',
QTime(12, 14, 14) if not self.treat_time_as_string() else '12:14:14',
QTime(12, 13, 14) if not self.treat_time_as_string() else '12:13:14',
QTime(13, 13, 14) if not self.treat_time_as_string() else '13:13:14', NULL])}
for field, expected in list(tests.items()):
request = QgsFeatureRequest().setSubsetOfAttributes([field], self.source.fields())
result = set([f[field] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.assertEqual(result, expected, 'Expected {}, got {}'.format(expected, result))
self.assertTrue(all_valid)
def testGetFeaturesSubsetAttributes2(self):
""" Test that other fields are NULL when fetching subsets of attributes """
for field_to_fetch in ['pk', 'cnt', 'name', 'name2', 'dt', 'date', 'time']:
for f in self.source.getFeatures(
QgsFeatureRequest().setSubsetOfAttributes([field_to_fetch], self.source.fields())):
# Check that all other fields are NULL and force name to lower-case
for other_field in [field.name() for field in self.source.fields() if
field.name().lower() != field_to_fetch]:
if other_field == 'pk' or other_field == 'PK':
# skip checking the primary key field, as it may be validly fetched by providers to use as feature id
continue
self.assertEqual(f[other_field], NULL,
'Value for field "{}" was present when it should not have been fetched by request'.format(
other_field))
def testGetFeaturesNoGeometry(self):
""" Test that no geometry is present when fetching features without geometry"""
for f in self.source.getFeatures(QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry)):
self.assertFalse(f.hasGeometry(), 'Expected no geometry, got one')
self.assertTrue(f.isValid())
def testGetFeaturesWithGeometry(self):
""" Test that geometry is present when fetching features without setting NoGeometry flag"""
for f in self.source.getFeatures(QgsFeatureRequest()):
if f['pk'] == 3:
# no geometry for this feature
continue
assert f.hasGeometry(), 'Expected geometry, got none'
self.assertTrue(f.isValid())
def testUniqueValues(self):
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('cnt'))),
set([-200, 100, 200, 300, 400]))
assert set(['Apple', 'Honey', 'Orange', 'Pear', NULL]) == set(
self.source.uniqueValues(self.source.fields().lookupField('name'))), 'Got {}'.format(
set(self.source.uniqueValues(self.source.fields().lookupField('name'))))
if self.treat_datetime_as_string():
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('dt'))),
set(['2021-05-04 13:13:14', '2020-05-04 12:14:14', '2020-05-04 12:13:14', '2020-05-03 12:13:14', NULL]))
else:
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('dt'))),
set([QDateTime(2021, 5, 4, 13, 13, 14), QDateTime(2020, 5, 4, 12, 14, 14), QDateTime(2020, 5, 4, 12, 13, 14), QDateTime(2020, 5, 3, 12, 13, 14), NULL]))
if self.treat_date_as_string():
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('date'))),
set(['2020-05-03', '2020-05-04', '2021-05-04', '2020-05-02', NULL]))
elif self.treat_date_as_datetime():
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('date'))),
set([QDateTime(2020, 5, 3, 0, 0, 0), QDateTime(2020, 5, 4, 0, 0, 0), QDateTime(2021, 5, 4, 0, 0, 0), QDateTime(2020, 5, 2, 0, 0, 0), NULL]))
else:
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('date'))),
set([QDate(2020, 5, 3), QDate(2020, 5, 4), QDate(2021, 5, 4), QDate(2020, 5, 2), NULL]))
if self.treat_time_as_string():
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('time'))),
set(['12:14:14', '13:13:14', '12:13:14', '12:13:01', NULL]))
else:
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('time'))),
set([QTime(12, 14, 14), QTime(13, 13, 14), QTime(12, 13, 14), QTime(12, 13, 1), NULL]))
def testMinimumValue(self):
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('cnt')), -200)
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('name')), 'Apple')
if self.treat_datetime_as_string():
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('dt')), '2020-05-03 12:13:14')
else:
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('dt')), QDateTime(QDate(2020, 5, 3), QTime(12, 13, 14)))
if self.treat_date_as_string():
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('date')), '2020-05-02')
elif not self.treat_date_as_datetime():
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('date')), QDate(2020, 5, 2))
else:
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('date')), QDateTime(2020, 5, 2, 0, 0, 0))
if not self.treat_time_as_string():
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('time')), QTime(12, 13, 1))
else:
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('time')), '12:13:01')
def testMaximumValue(self):
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('cnt')), 400)
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('name')), 'Pear')
if not self.treat_datetime_as_string():
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('dt')), QDateTime(QDate(2021, 5, 4), QTime(13, 13, 14)))
else:
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('dt')), '2021-05-04 13:13:14')
if self.treat_date_as_string():
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('date')), '2021-05-04')
elif not self.treat_date_as_datetime():
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('date')), QDate(2021, 5, 4))
else:
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('date')), QDateTime(2021, 5, 4, 0, 0, 0))
if not self.treat_time_as_string():
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('time')), QTime(13, 13, 14))
else:
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('time')), '13:13:14')
def testAllFeatureIds(self):
ids = set([f.id() for f in self.source.getFeatures()])
self.assertEqual(set(self.source.allFeatureIds()), ids)
def testSubsetOfAttributesWithFilterExprWithNonExistingColumn(self):
""" Test fix for https://github.com/qgis/QGIS/issues/33878 """
request = QgsFeatureRequest().setSubsetOfAttributes([0])
request.setFilterExpression("non_existing = 1")
features = [f for f in self.source.getFeatures(request)]
self.assertEqual(len(features), 0)
| gpl-2.0 | -4,131,022,380,989,090,300 | 55.609375 | 434 | 0.595718 | false |
thispc/download-manager | module/plugins/accounts/ZeveraCom.py | 7 | 2379 | # -*- coding: utf-8 -*-
import time
from ..internal.MultiAccount import MultiAccount
class ZeveraCom(MultiAccount):
__name__ = "ZeveraCom"
__type__ = "account"
__version__ = "0.36"
__status__ = "testing"
__config__ = [("mh_mode", "all;listed;unlisted", "Filter hosters to use", "all"),
("mh_list", "str", "Hoster list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12)]
__description__ = """Zevera.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]"),
("Walter Purcaro", "[email protected]"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
API_URL = "http://api.zevera.com/jDownloader.ashx"
def api_response(self, method, user, password=None, **kwargs):
get_data = {'cmd': method,
'login': user,
'pass': password}
get_data.update(kwargs)
res = self.load(self.API_URL,
get=get_data)
self.log_debug(res)
if ':' in res:
res = res.replace(',', '\n')
return dict((y.strip().lower(), z.strip()) for (y, z) in
[x.split(':', 1) for x in res.splitlines() if ':' in x])
else:
return res
def grab_hosters(self, user, password, data):
res = self.api_response("gethosters", user, password)
return [x.strip() for x in res.split(',')]
def grab_info(self, user, password, data):
validuntil = None
trafficleft = None
premium = False
res = self.api_response("accountinfo", user, password)
if "No trafic" not in res:
if res['endsubscriptiondate'] == "Expired!":
validuntil = time.time()
else:
validuntil = time.mktime(time.strptime(res['endsubscriptiondate'], "%Y/%m/%d %H:%M:%S"))
trafficleft = float(res['availabletodaytraffic']) * 1024 if res['orondaytrafficlimit'] != '0' else -1
premium = True
return {'validuntil': validuntil,
'trafficleft': trafficleft,
'premium': premium}
def signin(self, user, password, data):
if self.api_response("accountinfo", user, password) == "No trafic":
self.fail_login()
| gpl-3.0 | -8,852,935,636,855,944,000 | 32.041667 | 117 | 0.527533 | false |
gabeharms/firestorm | indra/viewer_components/updater/scripts/darwin/janitor.py | 2 | 4649 | #!/usr/bin/python
"""\
@file janitor.py
@author Nat Goodspeed
@date 2011-09-14
@brief Janitor class to clean up arbitrary resources
2013-01-04 cloned from vita because it's exactly what update_install.py needs.
$LicenseInfo:firstyear=2011&license=viewerlgpl$
Copyright (c) 2011, Linden Research, Inc.
$/LicenseInfo$
"""
import sys
import functools
import itertools
class Janitor(object):
"""
Usage:
Basic:
self.janitor = Janitor(sys.stdout) # report cleanup actions on stdout
...
self.janitor.later(os.remove, some_temp_file)
self.janitor.later(os.remove, some_other_file)
...
self.janitor.cleanup() # perform cleanup actions
Context Manager:
with Janitor() as janitor: # clean up quietly
...
janitor.later(shutil.rmtree, some_temp_directory)
...
# exiting 'with' block performs cleanup
Test Class:
class TestMySoftware(unittest.TestCase, Janitor):
def __init__(self):
Janitor.__init__(self) # quiet cleanup
...
def setUp(self):
...
self.later(os.rename, saved_file, original_location)
...
def tearDown(self):
Janitor.tearDown(self) # calls cleanup()
...
# Or, if you have no other tearDown() logic for
# TestMySoftware, you can omit the TestMySoftware.tearDown()
# def entirely and let it inherit Janitor.tearDown().
"""
def __init__(self, stream=None):
"""
If you pass stream= (e.g.) sys.stdout or sys.stderr, Janitor will
report its cleanup operations as it performs them. If you don't, it
will perform them quietly -- unless one or more of the actions throws
an exception, in which case you'll get output on stderr.
"""
self.stream = stream
self.cleanups = []
def later(self, func, *args, **kwds):
"""
Pass the callable you want to call at cleanup() time, plus any
positional or keyword args you want to pass it.
"""
# Get a name string for 'func'
try:
# A free function has a __name__
name = func.__name__
except AttributeError:
try:
# A class object (even builtin objects like ints!) support
# __class__.__name__
name = func.__class__.__name__
except AttributeError:
# Shrug! Just use repr() to get a string describing this func.
name = repr(func)
# Construct a description of this operation in Python syntax from
# args, kwds.
desc = "%s(%s)" % \
(name, ", ".join(itertools.chain((repr(a) for a in args),
("%s=%r" % (k, v) for (k, v) in kwds.iteritems()))))
# Use functools.partial() to bind passed args and keywords to the
# passed func so we get a nullary callable that does what caller
# wants.
bound = functools.partial(func, *args, **kwds)
self.cleanups.append((desc, bound))
def cleanup(self):
"""
Perform all the actions saved with later() calls.
"""
# Typically one allocates resource A, then allocates resource B that
# depends on it. In such a scenario it's appropriate to delete B
# before A -- so perform cleanup actions in reverse order. (This is
# the same strategy used by atexit().)
while self.cleanups:
# Until our list is empty, pop the last pair.
desc, bound = self.cleanups.pop(-1)
# If requested, report the action.
if self.stream is not None:
print >>self.stream, desc
try:
# Call the bound callable
bound()
except Exception, err:
# This is cleanup. Report the problem but continue.
print >>(self.stream or sys.stderr), "Calling %s\nraised %s: %s" % \
(desc, err.__class__.__name__, err)
def tearDown(self):
"""
If a unittest.TestCase subclass (or a nose test class) adds Janitor as
one of its base classes, and has no other tearDown() logic, let it
inherit Janitor.tearDown().
"""
self.cleanup()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
# Perform cleanup no matter how we exit this 'with' statement
self.cleanup()
# Propagate any exception from the 'with' statement, don't swallow it
return False
| lgpl-2.1 | -976,991,545,134,254,300 | 33.954887 | 100 | 0.568294 | false |
voicesauce/opensauce-python | tools/generate_snack_samples.py | 1 | 1936 | # Script to generate raw Snack samples from test wav files
# The data is used for comparison in unit tests
# Licensed under Apache v2 (see LICENSE)
import sys
import os
import glob
import numpy as np
from opensauce.snack import snack_raw_pitch, snack_raw_formants, sformant_names
def save_samples(data, fn, col_name, sample, out_dir):
"""Dump data in txt format using fn, col_name, and sample strings
in file name
"""
fn = os.path.splitext(os.path.basename(fn))[0]
fn = '-'.join(('sample', fn, col_name, sample))
fn = os.path.join(out_dir, fn) + '.txt'
np.savetxt(fn, data)
def main(wav_dir, out_dir):
# Find all .wav files in test/data directory
wav_files = glob.glob(os.path.join(wav_dir, '*.wav'))
# Generate Snack data for each wav file and save it to text files
method = 'tcl'
for wav_file in wav_files:
print('Processing wav file {}'.format(wav_file))
# Generate raw Snack pitch samples
# Use VoiceSauce default parameter values
F0_raw, V_raw = snack_raw_pitch(wav_file, method, frame_shift=1, window_size=25, max_pitch=500, min_pitch=40)
# Save raw Snack pitch samples
wav_basename = os.path.basename(wav_file)
# Save F0 and V data to separate text files
save_samples(F0_raw, wav_basename, 'sF0', '1ms', out_dir)
save_samples(V_raw, wav_basename, 'sV', '1ms', out_dir)
# Generate raw Snack formant samples
# Use VoiceSauce default parameter values
estimates_raw = snack_raw_formants(wav_file, method, frame_shift=1, window_size=25, pre_emphasis=0.96, lpc_order=12)
# Save raw Snack formant samples
wav_basename = os.path.basename(wav_file)
# Save data to separate text files
for n in sformant_names:
save_samples(estimates_raw[n], wav_basename, n, '1ms', out_dir)
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
| apache-2.0 | 4,129,909,481,922,300,000 | 35.528302 | 124 | 0.651343 | false |
Yuudachimoe/HikariChun-RedBot | lib/aiohttp/abc.py | 19 | 2147 | import asyncio
import sys
from abc import ABC, abstractmethod
from collections.abc import Iterable, Sized
PY_35 = sys.version_info >= (3, 5)
class AbstractRouter(ABC):
@asyncio.coroutine # pragma: no branch
@abstractmethod
def resolve(self, request):
"""Return MATCH_INFO for given request"""
class AbstractMatchInfo(ABC):
@asyncio.coroutine # pragma: no branch
@abstractmethod
def handler(self, request):
"""Execute matched request handler"""
@asyncio.coroutine # pragma: no branch
@abstractmethod
def expect_handler(self, request):
"""Expect handler for 100-continue processing"""
@property # pragma: no branch
@abstractmethod
def http_exception(self):
"""HTTPException instance raised on router's resolving, or None"""
@abstractmethod # pragma: no branch
def get_info(self):
"""Return a dict with additional info useful for introspection"""
class AbstractView(ABC):
def __init__(self, request):
self._request = request
@property
def request(self):
return self._request
@asyncio.coroutine # pragma: no branch
@abstractmethod
def __iter__(self):
while False: # pragma: no cover
yield None
if PY_35: # pragma: no branch
@abstractmethod
def __await__(self):
return # pragma: no cover
class AbstractResolver(ABC):
@asyncio.coroutine # pragma: no branch
@abstractmethod
def resolve(self, hostname):
"""Return IP address for given hostname"""
@asyncio.coroutine # pragma: no branch
@abstractmethod
def close(self):
"""Release resolver"""
class AbstractCookieJar(Sized, Iterable):
def __init__(self, *, loop=None):
self._loop = loop or asyncio.get_event_loop()
@abstractmethod
def clear(self):
"""Clear all cookies."""
@abstractmethod
def update_cookies(self, cookies, response_url=None):
"""Update cookies."""
@abstractmethod
def filter_cookies(self, request_url):
"""Return the jar's cookies filtered by their attributes."""
| gpl-3.0 | -9,145,994,920,863,235,000 | 23.397727 | 74 | 0.64136 | false |
peterfpeterson/mantid | Testing/SystemTests/tests/framework/GSASIIRefineFitPeaksTest.py | 3 | 5521 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from abc import ABCMeta, abstractmethod
import os
import re
import mantid
import site
import systemtesting
import tempfile
from mantid.simpleapi import GSASIIRefineFitPeaks, Load
class _AbstractGSASIIRefineFitPeaksTest(systemtesting.MantidSystemTest):
__metaclass__ = ABCMeta
fitted_peaks_ws = None
gamma = None
input_ws = None
rwp = None
sigma = None
lattice_params_table = None
_FITTED_PEAKS_WS_NAME = "FittedPeaks"
_LATTICE_PARAM_TBL_NAME = "LatticeParameters"
_INPUT_WORKSPACE_FILENAME = "ENGINX_280625_focused_bank_1.nxs"
_PHASE_FILENAME_1 = "Fe-gamma.cif"
_PHASE_FILENAME_2 = "Fe-alpha.cif"
_INST_PARAM_FILENAME = "template_ENGINX_241391_236516_North_bank.prm"
_TEMP_DIR = tempfile.gettempdir()
_path_to_gsas = None
@abstractmethod
def _get_expected_rwp(self):
pass
@abstractmethod
def _get_fit_params_reference_filename(self):
pass
@abstractmethod
def _get_fitted_peaks_reference_filename(self):
pass
@abstractmethod
def _get_gsas_proj_filename(self):
pass
@abstractmethod
def _get_refinement_method(self):
pass
def cleanup(self):
mantid.mtd.clear()
self.remove_all_gsas_files(gsas_filename_without_extension=self._get_gsas_proj_filename().split(".")[0])
def excludeInPullRequests(self):
return True
def input_ws_path(self):
return mantid.FileFinder.getFullPath(self._INPUT_WORKSPACE_FILENAME)
def inst_param_file_path(self):
return mantid.FileFinder.getFullPath(self._INST_PARAM_FILENAME)
def path_to_gsas(self):
if self._path_to_gsas is None:
gsas_location = os.path.join(site.USER_SITE, "g2conda", "GSASII")
if os.path.isdir(gsas_location):
self._path_to_gsas = os.path.join(site.USER_SITE, "g2conda", "GSASII")
else:
self._path_to_gsas = ""
return self._path_to_gsas
def phase_file_paths(self):
return mantid.FileFinder.getFullPath(self._PHASE_FILENAME_1) + "," + \
mantid.FileFinder.getFullPath(self._PHASE_FILENAME_2)
def remove_all_gsas_files(self, gsas_filename_without_extension):
for filename in os.listdir(self._TEMP_DIR):
if re.search(gsas_filename_without_extension, filename):
os.remove(os.path.join(self._TEMP_DIR, filename))
def runTest(self):
self.input_ws = Load(Filename=self.input_ws_path(), OutputWorkspace="input_ws")
gsas_path = self.path_to_gsas()
if not gsas_path:
self.fail("Could not find GSAS-II installation")
self.fitted_peaks_ws, self.lattice_params_table, self.rwp, self.sigma, self.gamma = \
GSASIIRefineFitPeaks(RefinementMethod=self._get_refinement_method(),
OutputWorkspace=self._FITTED_PEAKS_WS_NAME,
InputWorkspace=self.input_ws,
PhaseInfoFiles=self.phase_file_paths(),
InstrumentFile=self.inst_param_file_path(),
PathToGSASII=gsas_path,
SaveGSASIIProjectFile=self._get_gsas_proj_filename(),
MuteGSASII=True,
XMin=10000, XMax=40000,
LatticeParameters=self._LATTICE_PARAM_TBL_NAME,
RefineSigma=True, RefineGamma=True)
def skipTests(self):
# Skip this test, as it's just a wrapper for the Rietveld and Pawley tests
return True
def validate(self):
self.tolerance = 1e-4
self.assertAlmostEqual(self.rwp, self._get_expected_rwp(), delta=1e-5)
return (self._LATTICE_PARAM_TBL_NAME, mantid.FileFinder.getFullPath(self._get_fit_params_reference_filename()),
self._FITTED_PEAKS_WS_NAME, mantid.FileFinder.getFullPath(self._get_fitted_peaks_reference_filename()))
class GSASIIRefineFitPeaksRietveldTest(_AbstractGSASIIRefineFitPeaksTest):
def skipTests(self):
return not self.path_to_gsas()
def _get_expected_rwp(self):
return 39.09515
def _get_fit_params_reference_filename(self):
return "GSASIIRefineFitPeaksRietveldFitParams.nxs"
def _get_fitted_peaks_reference_filename(self):
return "GSASIIRefineFitPeaksRietveldFittedPeaks.nxs"
def _get_gsas_proj_filename(self):
return "GSASIIRefineFitPeaksRietveldTest.gpx"
def _get_refinement_method(self):
return "Rietveld refinement"
class GSASIIRefineFitPeaksPawleyTest(_AbstractGSASIIRefineFitPeaksTest):
def skipTests(self):
return not self.path_to_gsas()
def _get_expected_rwp(self):
return 35.02589
def _get_fit_params_reference_filename(self):
return "GSASIIRefineFitPeaksPawleyFitParams.nxs"
def _get_fitted_peaks_reference_filename(self):
return "GSASIIRefineFitPeaksPawleyFittedPeaks.nxs"
def _get_gsas_proj_filename(self):
return "GSASIIRefineFitPeaksPawleyTest.gpx"
def _get_refinement_method(self):
return "Pawley refinement"
| gpl-3.0 | -4,977,549,474,038,801,000 | 33.72327 | 119 | 0.646984 | false |
CINPLA/expipe-dev | phy-contrib/phycontrib/utils/precache.py | 1 | 1186 | # -*- coding: utf-8 -*-
"""Precache plugin."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os.path as op
from tqdm import tqdm
from phy import IPlugin
#------------------------------------------------------------------------------
# Plugin
#------------------------------------------------------------------------------
class PrecachePlugin(IPlugin):
def attach_to_controller(self, controller):
# Skip if the cache has already been created.
if op.exists(op.join(controller.cache_dir, 'done')):
return
s = controller.supervisor
@controller.connect
def on_gui_ready(gui):
# Create the cache.
for clu in tqdm(s.clustering.cluster_ids.tolist(),
desc="Precaching data",
leave=True,
):
s.select([clu])
s.select([])
# Mark the cache as complete.
with open(op.join(controller.cache_dir, 'done'), 'w') as f:
f.write('')
| gpl-3.0 | 3,707,451,821,832,769,000 | 29.410256 | 79 | 0.379427 | false |
ytsarev/rally | rally/openstack/common/db/sqlalchemy/session.py | 1 | 35132 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
`model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
.. note:: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and `reservation_rollback()`.
Examples:
.. code:: python
def get_foo(context, foo):
return (model_query(context, models.Foo).
filter_by(foo=foo).
first())
def update_foo(context, id, newfoo):
(model_query(context, models.Foo).
filter_by(id=id).
update({'foo': newfoo}))
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keep all the reads and writes within
the context managed by a single session. In this way, the session's
`__exit__` handler will take care of calling `flush()` and `commit()` for
you. If using this approach, you should not explicitly call `flush()` or
`commit()`. Any error within the context of the session will cause the
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
raised in `session`'s `__exit__` handler, and any try/except within the
context managed by `session` will not be triggered. And catching other
non-database errors in the session will not trigger the ROLLBACK, so
exception handlers should always be outside the session, unless the
developer wants to do a partial commit on purpose. If the connection is
dropped before this is possible, the database will implicitly roll back the
transaction.
.. note:: Statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call `model.save()`:
.. code:: python
def create_many_foo(context, foos):
session = sessionmaker()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = sessionmaker()
with session.begin():
foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id).
first())
(model_query(context, models.Bar, session).
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))
.. note:: `update_bar` is a trivially simple example of using
``with session.begin``. Whereas `create_many_foo` is a good example of
when a transaction is needed, it is always best to use as few queries as
possible.
The two queries in `update_bar` can be better expressed using a single query
which avoids the need for an explicit transaction. It can be expressed like
so:
.. code:: python
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
filter_by(id=foo_id).
limit(1).
subquery())
(model_query(context, models.Bar).
filter_by(id=subq.as_scalar()).
update({'bar': newbar}))
For reference, this emits approximately the following SQL statement:
.. code:: sql
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
exception while using ``with session.begin``. Here create two duplicate
instances with same primary key, must catch the exception out of context
managed by a single session:
.. code:: python
def create_duplicate_foo(context):
foo1 = models.Foo()
foo2 = models.Foo()
foo1.id = foo2.id = 1
session = sessionmaker()
try:
with session.begin():
session.add(foo1)
session.add(foo2)
except exception.DBDuplicateEntry as e:
handle_error(e)
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
.. code:: python
def myfunc(foo):
session = sessionmaker()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = sessionmaker()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid ``with_lockmode('UPDATE')`` when possible.
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use
``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
to your model class. For example:
.. code:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
`model.soft_delete()` and `query.soft_delete()`.
The `model.soft_delete()` method works with a single already-fetched entry.
`query.soft_delete()` makes only one db request for all entries that
correspond to the query.
* In almost all cases you should use `query.soft_delete()`. Some examples:
.. code:: python
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = sessionmaker()
with session.begin(subtransactions=True):
count = (model_query(BarModel).
find(some_condition).
soft_delete(synchronize_session=True))
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where `model.soft_delete()` is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
.. code:: python
def soft_delete_bar_model():
session = sessionmaker()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use the `query.soft_delete()` method:
.. code:: python
def soft_delete_multi_models():
session = sessionmaker()
with session.begin():
query = (model_query(BarModel, session=session).
find(some_condition))
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient.
.. code:: python
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import functools
import logging
import re
import time
import six
from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from rally.openstack.common.db import exception
from rally.openstack.common.gettextutils import _LE, _LW
from rally.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# sqlite since 3.7.16:
# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
#
# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
#
# ibm_db_sa:
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
# statement, UPDATE statement, or foreign key update caused by a
# DELETE statement are not valid because the primary key, unique
# constraint or unique index identified by "2" constrains table
# "NOVA.KEY_PAIRS" from having duplicate values for the index
# key.
_DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
for pattern in _DUP_KEY_RE_DB[engine_name]:
match = pattern.match(integrity_error.message)
if match:
break
else:
return
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
# columns so we have to omit that from the DBDuplicateEntry error.
columns = ''
if engine_name != 'ibm_db_sa':
columns = match.group(1)
if engine_name == "sqlite":
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
@functools.wraps(f)
def _wrap(self, *args, **kwargs):
try:
assert issubclass(
self.__class__, sqlalchemy.orm.session.Session
), ('_wrap_db_error() can only be applied to methods of '
'subclasses of sqlalchemy.orm.session.Session.')
return f(self, *args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
except sqla_exc.OperationalError as e:
_raise_if_db_connection_lost(e, self.bind)
_raise_if_deadlock_error(e, self.bind.dialect.name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(e)
return _wrap
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
time.sleep(0)
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL and DB2 connections are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
cursor = dbapi_conn.cursor()
try:
ping_sql = 'select 1'
if engine.name == 'ibm_db_sa':
# DB2 requires a table expression
ping_sql = 'select 1 from (values (1)) AS t1'
cursor.execute(ping_sql)
except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
msg = _LW('Database server has gone away: %s') % ex
LOG.warning(msg)
# if the database server has gone away, all connections in the pool
# have become invalid and we can safely close all of them here,
# rather than waste time on checking of every single connection
engine.dispose()
# this will be handled by SQLAlchemy and will force it to create
# a new connection and retry the original action
raise sqla_exc.DisconnectionError(msg)
else:
raise
def _set_session_sql_mode(dbapi_con, connection_rec, sql_mode=None):
"""Set the sql_mode session variable.
MySQL supports several server modes. The default is None, but sessions
may choose to enable server modes like TRADITIONAL, ANSI,
several STRICT_* modes and others.
Note: passing in '' (empty string) for sql_mode clears
the SQL mode for the session, overriding a potentially set
server default.
"""
cursor = dbapi_con.cursor()
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
def _mysql_get_effective_sql_mode(engine):
"""Returns the effective SQL mode for connections from the engine pool.
Returns ``None`` if the mode isn't available, otherwise returns the mode.
"""
# Get the real effective SQL mode. Even when unset by
# our own config, the server may still be operating in a specific
# SQL mode as set by the server configuration.
# Also note that the checkout listener will be called on execute to
# set the mode if it's registered.
row = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone()
if row is None:
return
return row[1]
def _mysql_check_effective_sql_mode(engine):
"""Logs a message based on the effective SQL mode for MySQL connections."""
realmode = _mysql_get_effective_sql_mode(engine)
if realmode is None:
LOG.warning(_LW('Unable to detect effective SQL mode'))
return
LOG.debug('MySQL server mode set to %s', realmode)
# 'TRADITIONAL' mode enables several other modes, so
# we need a substring match here
if not ('TRADITIONAL' in realmode.upper() or
'STRICT_ALL_TABLES' in realmode.upper()):
LOG.warning(_LW("MySQL SQL mode is '%s', "
"consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
realmode)
def _mysql_set_mode_callback(engine, sql_mode):
if sql_mode is not None:
mode_callback = functools.partial(_set_session_sql_mode,
sql_mode=sql_mode)
sqlalchemy.event.listen(engine, 'connect', mode_callback)
_mysql_check_effective_sql_mode(engine)
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
# For the db2, the error code is -30081 since the db2 is still not ready
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def _raise_if_db_connection_lost(error, engine):
# NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
# requires connection and cursor in incoming parameters,
# but we have no possibility to create connection if DB
# is not available, so in such case reconnect fails.
# But is_disconnect() ignores these parameters, so it
# makes sense to pass to function None as placeholder
# instead of connection and cursor.
if engine.dialect.is_disconnect(error, None, None):
raise exception.DBConnectionError(error)
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
idle_timeout=3600,
connection_debug=0, max_pool_size=None, max_overflow=None,
pool_timeout=None, sqlite_synchronous=True,
connection_trace=False, max_retries=10, retry_interval=10):
"""Return a new SQLAlchemy engine."""
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": idle_timeout,
'convert_unicode': True,
}
logger = logging.getLogger('sqlalchemy.engine')
# Map SQL debug level to Python log level
if connection_debug >= 100:
logger.setLevel(logging.DEBUG)
elif connection_debug >= 50:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if max_pool_size is not None:
engine_args['pool_size'] = max_pool_size
if max_overflow is not None:
engine_args['max_overflow'] = max_overflow
if pool_timeout is not None:
engine_args['pool_timeout'] = pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']:
ping_callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
if engine.name == 'mysql':
if mysql_sql_mode:
_mysql_set_mode_callback(engine, mysql_sql_mode)
elif 'sqlite' in connection_dict.drivername:
if not sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
_patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise
remaining = max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(retry_interval)
try:
engine.connect()
break
except sqla_exc.OperationalError as e:
if (remaining != 'infinite' and remaining == 0) or \
not _is_db_connection_error(e.args[0]):
raise
return engine
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
@_wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
@_wrap_db_error
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
@_wrap_db_error
def execute(self, *args, **kwargs):
return super(Session, self).execute(*args, **kwargs)
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _patch_mysqldb_with_stacktrace_comments():
"""Adds current stack trace as a comment in queries.
Patches MySQLdb.cursors.BaseCursor._do_query.
"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for filename, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if filename.endswith('session.py') and method == '_do_query':
continue
if filename.endswith('api.py') and method == 'wrapper':
continue
if filename.endswith('utils.py') and method == '_inner':
continue
if filename.endswith('exception.py') and method == '_wrap':
continue
# db/api is just a wrapper around db/sqlalchemy/api
if filename.endswith('db/api.py'):
continue
# only trace inside rally
index = filename.rfind('rally')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (filename[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
class EngineFacade(object):
"""A helper class for removing of global engine instances from rally.db.
As a library, rally.db can't decide where to store/when to create engine
and sessionmaker instances, so this must be left for a target application.
On the other hand, in order to simplify the adoption of rally.db changes,
we'll provide a helper class, which creates engine and sessionmaker
on its instantiation and provides get_engine()/get_session() methods
that are compatible with corresponding utility functions that currently
exist in target projects, e.g. in Nova.
engine/sessionmaker instances will still be global (and they are meant to
be global), but they will be stored in the app context, rather that in the
rally.db context.
Note: using of this helper is completely optional and you are encouraged to
integrate engine/sessionmaker instances into your apps any way you like
(e.g. one might want to bind a session to a request context). Two important
things to remember:
1. An Engine instance is effectively a pool of DB connections, so it's
meant to be shared (and it's thread-safe).
2. A Session instance is not meant to be shared and represents a DB
transactional context (i.e. it's not thread-safe). sessionmaker is
a factory of sessions.
"""
def __init__(self, sql_connection,
sqlite_fk=False, autocommit=True,
expire_on_commit=False, **kwargs):
"""Initialize engine and sessionmaker instances.
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
Keyword arguments:
:keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions.
(defaults to TRADITIONAL)
:keyword idle_timeout: timeout before idle sql connections are reaped
(defaults to 3600)
:keyword connection_debug: verbosity of SQL debugging information.
0=None, 100=Everything (defaults to 0)
:keyword max_pool_size: maximum number of SQL connections to keep open
in a pool (defaults to SQLAlchemy settings)
:keyword max_overflow: if set, use this value for max_overflow with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword pool_timeout: if set, use this value for pool_timeout with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
(defaults to True)
:keyword connection_trace: add python stack traces to SQL as comment
strings (defaults to False)
:keyword max_retries: maximum db connection retries during startup.
(setting -1 implies an infinite retry count)
(defaults to 10)
:keyword retry_interval: interval between retries of opening a sql
connection (defaults to 10)
"""
super(EngineFacade, self).__init__()
self._engine = create_engine(
sql_connection=sql_connection,
sqlite_fk=sqlite_fk,
mysql_sql_mode=kwargs.get('mysql_sql_mode', 'TRADITIONAL'),
idle_timeout=kwargs.get('idle_timeout', 3600),
connection_debug=kwargs.get('connection_debug', 0),
max_pool_size=kwargs.get('max_pool_size'),
max_overflow=kwargs.get('max_overflow'),
pool_timeout=kwargs.get('pool_timeout'),
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
connection_trace=kwargs.get('connection_trace', False),
max_retries=kwargs.get('max_retries', 10),
retry_interval=kwargs.get('retry_interval', 10))
self._session_maker = get_maker(
engine=self._engine,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
def get_engine(self):
"""Get the engine instance (note, that it's shared)."""
return self._engine
def get_session(self, **kwargs):
"""Get a Session instance.
If passed, keyword arguments values override the ones used when the
sessionmaker instance was created.
:keyword autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:keyword expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
for arg in kwargs:
if arg not in ('autocommit', 'expire_on_commit'):
del kwargs[arg]
return self._session_maker(**kwargs)
@classmethod
def from_config(cls, connection_string, conf,
sqlite_fk=False, autocommit=True, expire_on_commit=False):
"""Initialize EngineFacade using oslo.config config instance options.
:param connection_string: SQLAlchemy connection string
:type connection_string: string
:param conf: oslo.config config instance
:type conf: oslo.config.cfg.ConfigOpts
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
return cls(sql_connection=connection_string,
sqlite_fk=sqlite_fk,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
**dict(conf.database.items()))
| apache-2.0 | -2,689,039,843,084,943,400 | 37.862832 | 79 | 0.635261 | false |
LPM-HMS/COSMOS-2.0 | cosmos/job/drm/drm_slurm.py | 2 | 6911 | import datetime
import os
import re
import subprocess as sp
from pprint import pformat
from cosmos import TaskStatus
from cosmos.job.drm.DRM_Base import DRM
from cosmos.job.drm.util import convert_size_to_kb, div, exit_process_group, run_cli_cmd
from cosmos.util.retry import retry_call
from more_itertools import grouper
FAILED_STATES = [
"BOOT_FAIL",
"CANCELLED",
"FAILED",
"PREEMPTED",
"REVOKED",
"TIMEOUT",
"CANCELLED by 0",
]
PENDING_STATES = [
"PENDING",
"CONFIGURING",
"COMPLETING",
"RUNNING",
"NODE_FAIL",
"RESIZING",
"SUSPENDED",
]
COMPLETED_STATES = [
"COMPLETED",
]
def parse_slurm_time(s, default=0):
"""
>>> parse_slurm_time('03:53:03') / 60 / 60
3.8841666666666668
>>> parse_slurm_time('24-02:40:+') / 60 / 60
578.6666666666666
>>> parse_slurm_time('06:20:01') / 60 / 60
6.333611111111111
>>> parse_slurm_time('2-03:19:54') / 60 / 60
51.33166666666667
"""
if s.strip() == "":
return default
p = s.split("-")
if len(p) == 2:
days = p[0]
time = p[1]
elif len(p) == 1:
days = 0
time = p[0]
else:
raise AssertionError("impossible")
hours, mins, secs = time.split(":")
if secs == "+":
secs = 0
return int(days) * 24 * 60 * 60 + int(hours) * 60 * 60 + int(mins) * 60 + int(secs)
def parse_slurm_date(s):
return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S")
def sbatch(task):
ns = task.drm_native_specification if task.drm_native_specification else ""
cmd = (
[
"sbatch",
"-o",
os.path.abspath(task.output_stdout_path),
"-e",
os.path.abspath(task.output_stderr_path),
]
+ ns.split()
+ [task.output_command_script_path]
)
out, err, _ = run_cli_cmd(cmd, env=os.environ)
return str(re.search(r"job (\d+)", out).group(1))
class DRM_SLURM(DRM):
name = "slurm"
poll_interval = 5
def submit_job(self, task):
if task.environment_variables is not None:
raise NotImplementedError
for p in [task.output_stdout_path, task.output_stderr_path]:
if os.path.exists(p):
os.unlink(p)
task.drm_jobID = retry_call(
sbatch, fargs=[task], delay=10, tries=10, backoff=2, max_delay=60, logger=task.log,
)
task.status = TaskStatus.submitted
def filter_is_done(self, tasks):
"""
Yield a dictionary of Slurm job metadata for each task that has completed.
"""
# jobid can be none if submission fialed
job_ids = [t.drm_jobID for t in tasks if t.drm_jobID is not None]
if job_ids:
job_infos = retry_call(
do_sacct,
fargs=[job_ids],
delay=10,
tries=10,
backoff=2,
max_delay=60,
logger=tasks[0].workflow.log,
)
for task in tasks:
if task.drm_jobID in job_infos:
job_info = job_infos[task.drm_jobID]
if job_info["State"] in FAILED_STATES + COMPLETED_STATES:
job_info = parse_sacct(job_infos[task.drm_jobID], tasks[0].workflow.log)
yield task, job_info
else:
assert job_info["State"] in PENDING_STATES, (
"Invalid job state: `%s` for %s drm_job_id=%s"
% (job_info["State"], task, task.drm_jobID,)
)
def drm_statuses(self, tasks):
"""
:param tasks: tasks that have been submitted to the job manager
:returns: (dict) task.drm_jobID -> drm_status
"""
job_ids = [t.drm_jobID for t in tasks if t.drm_jobID is not None]
if job_ids:
job_infos = retry_call(
do_sacct,
fargs=[job_ids],
delay=10,
tries=10,
backoff=2,
max_delay=60,
logger=tasks[0].workflow.log,
)
def f(task):
return job_infos.get(task.drm_jobID, dict()).get("State", "UNK_JOB_STATE")
return {task.drm_jobID: f(task) for task in tasks}
else:
return {}
def kill(self, task):
"""Terminate a task."""
raise NotImplementedError
def kill_tasks(self, tasks):
for group in grouper(50, tasks):
group = [x for x in group if x is not None]
pids = [str(t.drm_jobID) for t in group]
sp.call(["scancel", "-Q"] + pids, preexec_fn=exit_process_group)
def do_sacct(job_ids):
# there's a lag between when a job finishes and when sacct is available :(Z
cmd = (
"sacct --format="
'"State,JobID,CPUTime,MaxRSS,AveRSS,AveCPU,CPUTimeRAW,AveVMSize,MaxVMSize,Elapsed,ExitCode,Start,End" '
"-j %s -P" % ",".join(job_ids)
)
out, err, _ = run_cli_cmd(cmd, shell=True)
parts = out.strip().split("\n")
# job_id_to_job_info_dict
all_jobs = dict()
# first line is the header
keys = parts[0].split("|")
# second line is all dashes, ignore it
for line in parts[2:]:
values = line.split("|")
job_dict = dict(list(zip(keys, values)))
if "batch" in job_dict["JobID"]:
# slurm prints these .batch versions of jobids which have better information, overwrite
job_dict["JobID"] = job_dict["JobID"].replace(".batch", "")
all_jobs[job_dict["JobID"]] = job_dict
return all_jobs
def parse_sacct(job_info, log=None):
try:
job_info2 = job_info.copy()
if job_info2["State"] in FAILED_STATES + PENDING_STATES:
job_info2["exit_status"] = None
else:
job_info2["exit_status"] = int(job_info2["ExitCode"].split(":")[0])
job_info2["cpu_time"] = int(job_info2["CPUTimeRAW"])
job_info2["wall_time"] = parse_slurm_time(job_info2["Elapsed"])
job_info2["percent_cpu"] = div(float(job_info2["cpu_time"]), float(job_info2["wall_time"]))
job_info2["avg_rss_mem"] = (
convert_size_to_kb(job_info2["AveRSS"]) if job_info2["AveRSS"] != "" else None
)
job_info2["max_rss_mem"] = (
convert_size_to_kb(job_info2["MaxRSS"]) if job_info2["MaxRSS"] != "" else None
)
job_info2["avg_vms_mem"] = (
convert_size_to_kb(job_info2["AveVMSize"]) if job_info2["AveVMSize"] != "" else None
)
job_info2["max_vms_mem"] = (
convert_size_to_kb(job_info2["MaxVMSize"]) if job_info2["MaxVMSize"] != "" else None
)
except Exception as e:
if log:
log.info("Error Parsing: %s" % pformat(job_info2))
raise e
return job_info2
| gpl-3.0 | -5,378,785,879,452,457,000 | 29.311404 | 111 | 0.536536 | false |
mypaint/mypaint | tests/unported/memory_leak.py | 5 | 7796 | #!/usr/bin/env python
from __future__ import division, print_function
from time import time
import sys
import os
import gc
import numpy as np
os.chdir(os.path.dirname(__file__))
sys.path.insert(0, '..')
from lib import mypaintlib, tiledsurface, brush, document, command, helpers
import guicontrol
# loadtxt is known to leak memory, thus we run it only once
# http://projects.scipy.org/numpy/ticket/1356
painting30sec_events = np.loadtxt('painting30sec.dat')
LEAK_EXIT_CODE = 33
def mem():
gc.collect()
with open('/proc/self/statm') as statm:
return int(statm.read().split()[0])
def check_garbage(msg='uncollectable garbage left over from previous tests'):
gc.collect()
garbage = []
for obj in gc.garbage:
# ignore garbage generated by numpy loadtxt command
if hasattr(obj, 'filename') and obj.filename == 'painting30sec.dat':
continue
garbage.append(obj)
assert not garbage, 'uncollectable garbage left over from previous tests: %s' % garbage
def iterations():
check_garbage()
max_mem = 0
max_mem_stable = 0
max_mem_increasing = 0
leak = True
m1 = 0
for i in range(options.max_iterations):
yield i
if options.debug:
if i == 3:
check_garbage()
helpers.record_memory_leak_status()
if i == 4 or i == 5:
helpers.record_memory_leak_status(print_diff=True)
m2 = mem()
print('iteration %02d/%02d: %d pages used (%+d)' % (
i + 1,
options.max_iterations,
m2,
m2 - m1))
m1 = m2
if m2 > max_mem:
max_mem = m2
max_mem_stable = 0
max_mem_increasing += 1
if max_mem_increasing == options.required:
print('maximum was always increasing for', max_mem_increasing,
'iterations')
break
else:
max_mem_stable += 1
max_mem_increasing = 0
if max_mem_stable == options.required:
print('maximum was stable for', max_mem_stable, 'iterations')
leak = False
break
check_garbage()
if leak:
print('memory leak found')
sys.exit(LEAK_EXIT_CODE)
else:
print('no leak found')
all_tests = {}
def leaktest(f):
"decorator to declare leak test functions"
all_tests[f.__name__] = f
return f
#@leaktest
def provoke_leak():
for i in iterations():
# note: interestingly this leaky only shows in the later iterations
# (and very small leaks might not be detected)
setattr(gc, 'my_test_leak_%d' % i, np.zeros(50000))
@leaktest
def noleak():
for i in iterations():
setattr(gc, 'my_test_leak', np.zeros(50000))
@leaktest
def document_alloc():
for i in iterations():
doc = document.Document()
doc.cleanup()
@leaktest
def surface_alloc():
for i in iterations():
tiledsurface.Surface()
def paint_doc(doc):
events = painting30sec_events
t_old = events[0][0]
layer = doc.layer_stack.current
for i, (t, x, y, pressure) in enumerate(events):
dtime = t - t_old
t_old = t
layer.stroke_to(doc.brush, x, y, pressure, 0.0, 0.0, dtime)
@leaktest
def save_test():
doc = document.Document()
paint_doc(doc)
for i in iterations():
doc.save('test_leak.ora')
doc.save('test_leak.png')
doc.save('test_leak.jpg')
doc.cleanup()
@leaktest
def repeated_loading():
doc = document.Document()
for i in iterations():
doc.load('bigimage.ora')
doc.cleanup()
@leaktest
def paint_save_clear():
doc = document.Document()
for i in iterations():
paint_doc(doc)
doc.save('test_leak.ora')
doc.clear()
doc.cleanup()
def paint_gui(gui):
"""
Paint with a constant number of frames per recorded second.
Not entirely realistic, but gives good and stable measurements.
"""
FPS = 30
gui_doc = gui.app.doc
model = gui_doc.model
tdw = gui_doc.tdw
b = gui.app.brushmanager.get_brush_by_name('redbrush')
gui.app.brushmanager.select_brush(b)
events = list(painting30sec_events)
t_old = 0.0
t_last_redraw = 0.0
for t, x, y, pressure in events:
if t > t_last_redraw + 1.0/FPS:
gui.wait_for_gui()
t_last_redraw = t
dtime = t - t_old
t_old = t
x, y = tdw.display_to_model(x, y)
gui_doc.modes.top.stroke_to(model, dtime, x, y, pressure, 0.0, 0.0)
@leaktest
def gui_test():
# NOTE: this an all-in-one GUI test as a workaround for the
# problem that the GUI does not cleanly terminate after the test fork()
gui = guicontrol.GUI()
gui.wait_for_idle()
gui.app.filehandler.open_file(u'bigimage.ora')
gui_doc = gui.app.doc
for i in iterations():
gui.app.filehandler.open_file(u'smallimage.ora')
gui.wait_for_idle()
paint_gui(gui)
gui.app.filehandler.save_file(u'test_save.ora')
gui.scroll()
gui_doc.zoom(gui_doc.ZOOM_OUTWARDS)
gui.scroll()
gui_doc.zoom(gui_doc.ZOOM_INWARDS)
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.INFO)
from optparse import OptionParser
parser = OptionParser('usage: %prog [options] [test1 test2 test3 ...]')
parser.add_option(
'-a',
'--all',
action='store_true',
default=False,
help='run all tests'
)
parser.add_option(
'-l',
'--list',
action='store_true',
default=False,
help='list all available tests'
)
parser.add_option(
'-d',
'--debug',
action='store_true',
default=False,
help='print leak analysis (slow)'
)
parser.add_option(
'-e',
'--exit',
action='store_true',
default=False,
help='exit at first error'
)
parser.add_option(
'-r',
'--required',
type='int',
default=15,
help='iterations required to draw a conclusion (default: 15)'
)
parser.add_option(
'-m',
'--max-iterations',
type='int',
default=100,
help='maximum number of iterations (default: 100)'
)
options, tests = parser.parse_args()
if options.list:
for name in sorted(all_tests.keys()):
print(name)
sys.exit(0)
if options.required >= options.max_iterations:
print('requiring more good iterations than the iteration limit makes '
'no sense')
sys.exit(1)
if not tests:
if options.all:
tests = list(all_tests)
else:
parser.print_help()
sys.exit(1)
for t in tests:
if t not in all_tests:
print('Unknown test:', t)
sys.exit(1)
results = []
for t in tests:
child_pid = os.fork()
if not child_pid:
print('---')
print('running test "%s"' % t)
print('---')
all_tests[t]()
sys.exit(0)
pid, status = os.wait()
exitcode = os.WEXITSTATUS(status)
if options.exit and exitcode != 0:
sys.exit(1)
results.append(exitcode)
everything_okay = True
print()
print('=== SUMMARY ===')
for t, exitcode in zip(tests, results):
if exitcode == 0:
print(t, 'OK')
else:
everything_okay = False
if exitcode == LEAK_EXIT_CODE:
print(t, 'LEAKING')
else:
print(t, 'EXCEPTION')
if not everything_okay:
sys.exit(1)
| gpl-2.0 | -6,847,488,669,787,236,000 | 24.311688 | 91 | 0.557978 | false |
jtk54/spinnaker | testing/citest/tests/google_http_lb_upsert_scenario.py | 1 | 24264 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test scenario for Gcp Http(s) Load Balancers.
# Standard python modules.
import copy
import json
import time
# citest modules.
import citest.gcp_testing as gcp
import citest.json_predicate as jp
import citest.service_testing as st
from citest.json_contract import ObservationPredicateFactory
ov_factory = ObservationPredicateFactory()
# Spinnaker modules.
import spinnaker_testing as sk
import spinnaker_testing.gate as gate
SCOPES = [gcp.COMPUTE_READ_WRITE_SCOPE]
GCE_URL_PREFIX = 'https://www.googleapis.com/compute/v1/projects/'
class GoogleHttpLoadBalancerTestScenario(sk.SpinnakerTestScenario):
'''Defines the tests for L7 Load Balancers.
'''
MINIMUM_PROJECT_QUOTA = {
'INSTANCE_TEMPLATES': 1,
'BACKEND_SERVICES': 3,
'URL_MAPS': 1,
'HEALTH_CHECKS': 1,
'IN_USE_ADDRESSES': 2,
'SSL_CERTIFICATES': 2,
'TARGET_HTTP_PROXIES': 1,
'TARGET_HTTPS_PROXIES': 1,
'FORWARDING_RULES': 2
}
MINIMUM_REGION_QUOTA = {
'CPUS': 2,
'IN_USE_ADDRESSES': 2,
'INSTANCE_GROUP_MANAGERS': 1,
'INSTANCES': 2,
}
@classmethod
def new_agent(cls, bindings):
'''Implements citest.service_testing.AgentTestScenario.new_agent.'''
agent = gate.new_agent(bindings)
agent.default_max_wait_secs = 1200
return agent
def __init__(self, bindings, agent=None):
'''Constructor.
Args:
bindings: [dict] The data bindings to use to configure the scenario.
agent: [GateAgent] The agent for invoking the test operations on Gate.
'''
super(GoogleHttpLoadBalancerTestScenario, self).__init__(bindings, agent)
bindings = self.bindings
self.__lb_detail = 'httplb'
self.TEST_APP = bindings['TEST_APP']
self.__lb_name = '{app}-{stack}-{detail}'.format(
app=bindings['TEST_APP'], stack=bindings['TEST_STACK'],
detail=self.__lb_detail)
self.__first_cert = 'first-cert-%s' % (bindings['TEST_APP'])
self.__proto_hc = {
'name': 'basic-' + self.TEST_APP,
'requestPath': '/',
'port': 80,
'checkIntervalSec': 2,
'timeoutSec': 1,
'healthyThreshold': 3,
'unhealthyThreshold': 4
}
self.__proto_delete = {
'type': 'deleteLoadBalancer',
'cloudProvider': 'gce',
'loadBalancerType': 'HTTP',
'loadBalancerName': self.__lb_name,
'region': bindings['TEST_GCE_REGION'],
'regions': [bindings['TEST_GCE_REGION']],
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'user': '[anonymous]'
}
self.__proto_upsert = {
'cloudProvider': 'gce',
'provider': 'gce',
'stack': bindings['TEST_STACK'],
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'region': bindings['TEST_GCE_REGION'],
'loadBalancerType': 'HTTP',
'loadBalancerName': self.__lb_name,
'urlMapName': self.__lb_name,
'listenersToDelete': [],
'portRange': '80',
'defaultService': {
'name': 'default-' + self.TEST_APP,
'backends': [],
'healthCheck': self.__proto_hc,
},
'certificate': self.__first_cert,
'hostRules': [
{
'hostPatterns': ['host1.com', 'host2.com'],
'pathMatcher': {
'pathRules': [
{
'paths': ['/path', '/path2/more'],
'backendService': {
'name': 'bs-' + self.TEST_APP,
'backends': [],
'healthCheck': self.__proto_hc,
}
}
],
'defaultService': {
'name': 'pm-' + self.TEST_APP,
'backends': [],
'healthCheck': self.__proto_hc,
}
}
}
],
'type': 'upsertLoadBalancer',
'availabilityZones': {bindings['TEST_GCE_REGION']: []},
'user': '[anonymous]'
}
def _get_bs_link(self, bs):
'''Make a fully-formatted backend service link.
'''
return (GCE_URL_PREFIX
+ self.bindings['GOOGLE_PRIMARY_MANAGED_PROJECT_ID']
+ '/global/backendServices/' + bs)
def _get_hc_link(self, hc):
'''Make a fully-formatted health check link.
'''
return (GCE_URL_PREFIX
+ self.bindings['GOOGLE_PRIMARY_MANAGED_PROJECT_ID']
+ '/global/httpHealthChecks/' + hc)
def _set_all_hcs(self, upsert, hc):
'''Set all health checks in upsert to hc.
'''
upsert['defaultService']['healthCheck'] = hc
for host_rule in upsert['hostRules']:
path_matcher = host_rule['pathMatcher']
path_matcher['defaultService']['healthCheck'] = hc
for path_rule in path_matcher['pathRules']:
path_rule['backendService']['healthCheck'] = hc
def _add_contract_clauses(self, contract_builder, upsert):
'''Add the proper predicates to the contract builder for a given
upsert description.
'''
host_rules = upsert['hostRules'] # Host rules will be distinct.
backend_services = [upsert['defaultService']]
for host_rule in host_rules:
path_matcher = host_rule['pathMatcher']
backend_services.append(path_matcher['defaultService'])
for path_rule in path_matcher['pathRules']:
backend_services.append(path_rule['backendService'])
health_checks = [service['healthCheck'] for service in backend_services]
hc_clause_builder = (contract_builder
.new_clause_builder('Health Checks Created',
retryable_for_secs=30)
.list_resource('httpHealthChecks'))
for hc in health_checks:
hc_clause_builder.AND(
ov_factory.value_list_contains(jp.DICT_MATCHES({
'name': jp.STR_EQ(hc['name']),
'requestPath': jp.STR_EQ(hc['requestPath']),
'port': jp.NUM_EQ(hc['port'])})))
bs_clause_builder = (contract_builder.
new_clause_builder('Backend Services Created',
retryable_for_secs=30).
list_resource('backendServices'))
for bs in backend_services:
bs_clause_builder.AND(ov_factory.value_list_contains(jp.DICT_MATCHES({
'name': jp.STR_EQ(bs['name']),
'portName': jp.STR_EQ('http'),
'healthChecks':
jp.LIST_MATCHES([
jp.STR_EQ(self._get_hc_link(bs['healthCheck']['name']))])
})))
url_map_clause_builder = (contract_builder
.new_clause_builder('Url Map Created',
retryable_for_secs=30)
.list_resource('urlMaps'))
for hr in host_rules:
pm = hr['pathMatcher']
path_rules_spec = [
jp.DICT_MATCHES({
'service': jp.STR_EQ(
self._get_bs_link(pr['backendService']['name'])),
'paths':
jp.LIST_MATCHES([jp.STR_EQ(path) for path in pr['paths']])
})
for pr in pm['pathRules']]
path_matchers_spec = {
'defaultService':
jp.STR_EQ(self._get_bs_link(pm['defaultService']['name'])),
'pathRules': jp.LIST_MATCHES(path_rules_spec)
}
url_map_clause_builder.AND(
ov_factory.value_list_contains(jp.DICT_MATCHES({
'name': jp.STR_EQ(self.__lb_name),
'defaultService':
jp.STR_EQ(self._get_bs_link(upsert['defaultService']['name'])),
'hostRules/hosts':
jp.LIST_MATCHES([jp.STR_SUBSTR(host)
for host in hr['hostPatterns']]),
'pathMatchers':
jp.LIST_MATCHES([jp.DICT_MATCHES(path_matchers_spec)]),
})))
port_string = '443-443'
if upsert['certificate'] == '':
port_string = '%s-%s' % (upsert['portRange'], upsert['portRange'])
(contract_builder.new_clause_builder('Forwarding Rule Created',
retryable_for_secs=30)
.list_resource('globalForwardingRules')
.EXPECT(ov_factory.value_list_contains(jp.DICT_MATCHES({
'name': jp.STR_EQ(self.__lb_name),
'portRange': jp.STR_EQ(port_string)
}))))
proxy_clause_builder = contract_builder.new_clause_builder(
'Target Proxy Created', retryable_for_secs=30)
self._add_proxy_clause(upsert['certificate'], proxy_clause_builder)
def _add_proxy_clause(self, certificate, proxy_clause_builder):
target_proxy_name = '%s-target-%s-proxy'
if certificate:
target_proxy_name = target_proxy_name % (self.__lb_name, 'https')
(proxy_clause_builder.list_resource('targetHttpsProxies')
.EXPECT(ov_factory.value_list_path_contains(
'name', jp.STR_EQ(target_proxy_name))))
else:
target_proxy_name = target_proxy_name % (self.__lb_name, 'http')
(proxy_clause_builder.list_resource('targetHttpProxies')
.EXPECT(ov_factory.value_list_path_contains(
'name', jp.STR_EQ(target_proxy_name))))
def upsert_full_load_balancer(self):
'''Upserts L7 LB with full hostRules, pathMatchers, etc.
Calls the upsertLoadBalancer operation with a payload, then verifies that
the expected resources are visible on GCP.
'''
hc = copy.deepcopy(self.__proto_hc)
hc['requestPath'] = '/'
hc['port'] = 80
upsert = copy.deepcopy(self.__proto_upsert)
self._set_all_hcs(upsert, hc)
payload = self.agent.make_json_payload_from_kwargs(
job=[upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, upsert)
return st.OperationContract(
self.new_post_operation(title='upsert full http lb',
data=payload, path='tasks'),
contract=contract_builder.build()
)
def upsert_min_load_balancer(self):
'''Upserts a L7 LB with the minimum description.
'''
upsert = copy.deepcopy(self.__proto_upsert)
upsert['hostRules'] = []
upsert['certificate'] = '' # Test HTTP upsert, not HTTPS.
payload = self.agent.make_json_payload_from_kwargs(
job=[upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, upsert)
return st.OperationContract(
self.new_post_operation(title='upsert min http lb',
data=payload, path='tasks'),
contract=contract_builder.build()
)
def delete_http_load_balancer(self):
'''Deletes the L7 LB.
'''
bindings = self.bindings
delete = copy.deepcopy(self.__proto_delete)
payload = self.agent.make_json_payload_from_kwargs(
job=[delete],
description='Delete L7 Load Balancer: {0} in {1}:{2}'.format(
self.__lb_name,
bindings['SPINNAKER_GOOGLE_ACCOUNT'],
bindings['TEST_GCE_REGION'],
),
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
(contract_builder.new_clause_builder('Health Check Removed',
retryable_for_secs=30)
.list_resource('httpHealthChecks')
.EXPECT(ov_factory.value_list_path_excludes(
'name', jp.STR_SUBSTR(self.__proto_hc['name'])))
)
(contract_builder.new_clause_builder('Url Map Removed',
retryable_for_secs=30)
.list_resource('urlMaps')
.EXPECT(ov_factory.value_list_path_excludes(
'name', jp.STR_SUBSTR(self.__lb_name)))
)
(contract_builder.new_clause_builder('Forwarding Rule Removed',
retryable_for_secs=30)
.list_resource('globalForwardingRules')
.EXPECT(ov_factory.value_list_path_excludes(
'name', jp.STR_SUBSTR(self.__lb_name)))
)
return st.OperationContract(
self.new_post_operation(
title='delete_http_load_balancer', data=payload, path='tasks'),
contract=contract_builder.build())
def change_health_check(self):
'''Changes the health check associated with the LB.
'''
upsert = copy.deepcopy(self.__proto_upsert)
hc = copy.deepcopy(self.__proto_hc)
hc['requestPath'] = '/changedPath'
hc['port'] = 8080
self._set_all_hcs(upsert, hc)
payload = self.agent.make_json_payload_from_kwargs(
job=[upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, upsert)
return st.OperationContract(
self.new_post_operation(title='change health checks',
data=payload, path='tasks'),
contract=contract_builder.build()
)
def change_backend_service(self):
'''Changes the default backend service associated with the LB.
'''
hc = copy.deepcopy(self.__proto_hc)
bs_upsert = copy.deepcopy(self.__proto_upsert)
hc['name'] = 'updated-' + self.TEST_APP
hc['requestPath'] = '/changedPath1'
hc['port'] = 8080
bs_upsert['defaultService']['healthCheck'] = hc
payload = self.agent.make_json_payload_from_kwargs(
job=[bs_upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, bs_upsert)
return st.OperationContract(
self.new_post_operation(title='change backend services',
data=payload, path='tasks'),
contract=contract_builder.build()
)
def add_host_rule(self):
'''Adds a host rule to the url map.
'''
bs_upsert = copy.deepcopy(self.__proto_upsert)
hr = copy.deepcopy(bs_upsert['hostRules'][0])
hr['hostPatterns'] = ['added.host1.com', 'added.host2.com']
hr['pathMatcher']['pathRules'][0]['paths'] = ['/added/path']
bs_upsert['hostRules'].append(hr)
payload = self.agent.make_json_payload_from_kwargs(
job=[bs_upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, bs_upsert)
return st.OperationContract(
self.new_post_operation(title='add host rule',
data=payload, path='tasks'),
contract=contract_builder.build()
)
def update_host_rule(self):
'''Updates a host rule to the url map.
'''
bs_upsert = copy.deepcopy(self.__proto_upsert)
hr = copy.deepcopy(bs_upsert['hostRules'][0])
hr['hostPatterns'] = ['updated.host1.com']
hr['pathMatcher']['pathRules'][0]['paths'] = ['/updated/path']
bs_upsert['hostRules'].append(hr)
payload = self.agent.make_json_payload_from_kwargs(
job=[bs_upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, bs_upsert)
return st.OperationContract(
self.new_post_operation(title='update host rule',
data=payload, path='tasks'),
contract=contract_builder.build()
)
def add_cert(self, certname, title):
'''Add cert to targetHttpProxy to make it a targetHttpsProxy.
'''
bs_upsert = copy.deepcopy(self.__proto_upsert)
bs_upsert['certificate'] = certname
payload = self.agent.make_json_payload_from_kwargs(
job=[bs_upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, bs_upsert)
return st.OperationContract(
self.new_post_operation(title=title,
data=payload, path='tasks'),
contract=contract_builder.build()
)
def add_security_group(self):
'''Associates a security group with the L7 load balancer.
'''
bindings = self.bindings
sec_group_payload = self.agent.make_json_payload_from_kwargs(
job=[
{
'allowed': [
{
'ipProtocol': 'tcp',
'portRanges': ['80-80']
},
{
'ipProtocol': 'tcp',
'portRanges': ['8080-8080']
},
{
'ipProtocol': 'tcp',
'portRanges': ['443-443']
}
],
'backingData': {'networks': ['default']},
'cloudProvider': 'gce',
'application': self.TEST_APP,
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'description': '',
'detail': 'http',
'ipIngress': [
{
'type': 'tcp',
'startPort': 80,
'endPort': 80,
},
{
'type': 'tcp',
'startPort': 8080,
'endPort': 8080,
},
{
'type': 'tcp',
'startPort': 443,
'endPort': 443,
}
],
'name': self.__lb_name + '-rule',
'network': 'default',
'region': 'global',
'securityGroupName': self.__lb_name + '-rule',
'sourceRanges': ['0.0.0.0/0'],
'targetTags': [self.__lb_name + '-tag'],
'type': 'upsertSecurityGroup',
'user': '[anonymous]'
}
],
description='Create a Security Group for L7 operations.',
application=self.TEST_APP
)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Security Group Created',
retryable_for_secs=30)
.list_resource('firewalls')
.EXPECT(ov_factory.value_list_path_contains(
'name', jp.STR_SUBSTR(self.__lb_name + '-rule'))))
return st.OperationContract(
self.new_post_operation(title='create security group',
data=sec_group_payload, path='tasks'),
contract=builder.build()
)
def delete_security_group(self):
'''Deletes a security group.
'''
bindings = self.bindings
sec_group_payload = self.agent.make_json_payload_from_kwargs(
job=[
{
'cloudProvider': 'gce',
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'regions': ['global'],
'securityGroupName': self.__lb_name + '-rule',
'type': 'deleteSecurityGroup',
'user': '[anonymous]'
}
],
description='Delete a Security Group.',
application=self.TEST_APP
)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Security Group Deleted',
retryable_for_secs=30)
.list_resource('firewalls')
.EXPECT(ov_factory.value_list_path_excludes(
'name', jp.STR_SUBSTR(self.__lb_name + '-rule'))))
return st.OperationContract(
self.new_post_operation(title='delete security group',
data=sec_group_payload, path='tasks'),
contract=builder.build()
)
def add_server_group(self):
'''Adds a server group to the L7 LB.
'''
time.sleep(60) # Wait for the L7 LB to be ready.
bindings = self.bindings
group_name = '{app}-{stack}-v000'.format(app=self.TEST_APP,
stack=bindings['TEST_STACK'])
policy = {
'balancingMode': 'UTILIZATION',
'listeningPort': 80,
'maxUtilization': 0.8,
'capacityScaler': 0.8
}
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'gce',
'application': self.TEST_APP,
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'strategy':'',
'capacity': {'min':1, 'max':1, 'desired':1},
'targetSize': 1,
'image': bindings['TEST_GCE_IMAGE_NAME'],
'zone': bindings['TEST_GCE_ZONE'],
'stack': bindings['TEST_STACK'],
'instanceType': 'f1-micro',
'type': 'createServerGroup',
'tags': [self.__lb_name + '-tag'],
'loadBalancers': [self.__lb_name],
'backendServices': {self.__lb_name: ['bs-' + self.TEST_APP]},
'disableTraffic': False,
'loadBalancingPolicy': {
'balancingMode': 'UTILIZATION',
'listeningPort': 80,
'maxUtilization': 0.8,
'capacityScaler': 0.8
},
'availabilityZones': {
bindings['TEST_GCE_REGION']: [bindings['TEST_GCE_ZONE']]
},
'instanceMetadata': {
'startup-script': ('sudo apt-get update'
' && sudo apt-get install apache2 -y'),
'global-load-balancer-names': self.__lb_name,
'backend-service-names': 'bs-' + self.TEST_APP,
'load-balancing-policy': json.dumps(policy)
},
'account': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'authScopes': ['compute'],
'user': '[anonymous]'
}],
description='Create Server Group in ' + group_name,
application=self.TEST_APP
)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Managed Instance Group Added',
retryable_for_secs=30)
.inspect_resource('instanceGroupManagers', group_name)
.EXPECT(ov_factory.value_list_path_contains('targetSize', jp.NUM_EQ(1)))
)
return st.OperationContract(
self.new_post_operation(title='create server group',
data=payload, path='tasks'),
contract=builder.build()
)
def delete_server_group(self):
"""Creates OperationContract for deleteServerGroup.
To verify the operation, we just check that the GCP managed instance group
is no longer visible on GCP (or is in the process of terminating).
"""
bindings = self.bindings
group_name = '{app}-{stack}-v000'.format(
app=self.TEST_APP, stack=bindings['TEST_STACK'])
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'gce',
'serverGroupName': group_name,
'region': bindings['TEST_GCE_REGION'],
'zone': bindings['TEST_GCE_ZONE'],
'type': 'destroyServerGroup',
'regions': [bindings['TEST_GCE_REGION']],
'zones': [bindings['TEST_GCE_ZONE']],
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'user': '[anonymous]'
}],
application=self.TEST_APP,
description='DestroyServerGroup: ' + group_name
)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Managed Instance Group Removed')
.inspect_resource('instanceGroupManagers', group_name)
.EXPECT(
ov_factory.error_list_contains(gcp.HttpErrorPredicate(http_code=404)))
.OR(ov_factory.value_list_path_contains('targetSize', jp.NUM_EQ(0))))
(builder.new_clause_builder('Instances Are Removed',
retryable_for_secs=30)
.list_resource('instances')
.EXPECT(ov_factory.value_list_path_excludes(
'name', jp.STR_SUBSTR(group_name))))
return st.OperationContract(
self.new_post_operation(
title='delete server group', data=payload, path='tasks'),
contract=builder.build()
)
| apache-2.0 | -3,407,985,809,844,732,000 | 33.319661 | 81 | 0.585682 | false |
fzesch/lagesonum | tests/test_input_number.py | 3 | 2465 | # coding: utf-8
# test cases for validation function: success, failure, sanity
from lagesonum.input_number import parse_numbers, is_valid_number
from unittest import TestCase, main
class InputTests(TestCase):
"""Test case for checking whether input in form differing from well formatted line by line input is accepted."""
def test_input_two_different_on_one_line(self):
input_num = "A123 B123"
result = parse_numbers(input_num)
self.assertEqual({"A123", "B123"}, set(result))
def test_input_three_on_three_lines(self):
input_num = "A123\nB123\nC123"
result = parse_numbers(input_num)
self.assertEqual({"A123", "B123", "C123"}, set(result))
def test_input_three_times_three_lines_mixed(self):
input_num = "A123 A234 A345\nB123 B234 B345 \nC123 C234 C345"
result = parse_numbers(input_num)
self.assertEqual({"A123", "B123", "C123", "A234", "B234", "C234", "A345", "B345", "C345"}, set(result))
def test_multiple_delimiters_on_three_lines_mixed(self):
input_num = "A123, A234; A345,\nB123. B234 B345. \nC123 C234 C345"
result = parse_numbers(input_num)
self.assertEqual({"A123", "B123", "C123", "A234", "B234", "C234", "A345", "B345", "C345"}, set(result))
def test_malformed_numbers(self):
input_num = "a123, a234; a345,\nB123. B234 B345. \nC123 C234 C345"
result = parse_numbers(input_num)
self.assertEqual({"A123", "B123", "C123", "A234", "B234", "C234", "A345", "B345", "C345"}, set(result))
def test_fail_numbers(self):
input_num = "A 123, A 234; A345,\nB123. B234 B345. \nC123 C234 C345"
result = parse_numbers(input_num)
self.assertEqual({"B123", "C123", "B234", "C234", "A345", "B345", "C345"}, set(result))
def test_empty_input(self):
input_num = " "
result = parse_numbers(input_num)
self.assertEqual(set(), set(result))
def test_drop_table_parse(self):
input_num = "DROP TABLE NUMBERS"
result = parse_numbers(input_num)
self.assertEqual(set(), set(result))
def test_drop_table_valid(self):
input_num = "DROP TABLE NUMBERS"
result = is_valid_number(input_num)
self.assertEqual(False, result)
def test_valid_pos(self):
input_num = "A1234\nB234"
result = is_valid_number(input_num)
self.assertEqual(True, result)
if __name__ == '__main__':
main() | mit | 4,748,365,251,079,537,000 | 38.142857 | 116 | 0.620284 | false |
ad-m/django-mailbox | django_mailbox/management/commands/processincomingmessage.py | 2 | 1492 | import email
import logging
import sys
try:
from email import utils
except ImportError:
import rfc822 as utils
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
args = "<[Mailbox Name (optional)]>"
help = "Receive incoming mail via stdin"
def add_arguments(self, parser):
parser.add_argument(
'mailbox_name',
nargs='?',
help="The name of the mailbox that will receive the message"
)
def handle(self, mailbox_name=None, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
if mailbox_name:
mailbox = self.get_mailbox_by_name(mailbox_name)
else:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info(
"Message received from %s",
message['from']
)
else:
logger.warning("Message not processable.")
def get_mailbox_by_name(self, name):
mailbox, created = Mailbox.objects.get_or_create(
name=name,
)
return mailbox
def get_mailbox_for_message(self, message):
email_address = utils.parseaddr(message['to'])[1][0:255]
return self.get_mailbox_by_name(email_address)
| mit | -3,416,213,057,997,690,400 | 27.692308 | 72 | 0.61126 | false |
e-q/scipy | scipy/cluster/tests/test_vq.py | 6 | 12417 |
import warnings
import sys
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_equal, assert_,
suppress_warnings)
import pytest
from pytest import raises as assert_raises
from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten,
ClusterError, _krandinit)
from scipy.cluster import _vq
from scipy.sparse.sputils import matrix
TESTDATA_2D = np.array([
-2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68,
-2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45,
2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28,
-4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07,
-0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29,
-2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25,
2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21,
-2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67,
-2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94,
-2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33,
2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8,
-1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29,
2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75,
-1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17,
0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44,
-0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83,
0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28,
3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62,
-1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35,
3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84,
-2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75,
-0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86,
-2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83,
0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75,
-2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03,
3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0,
3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99,
-1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21,
1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75,
4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37,
-3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0,
-1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84,
2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69,
-2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51,
-0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71,
-2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61,
2.11]).reshape((200, 2))
# Global data
X = np.array([[3.0, 3], [4, 3], [4, 2],
[9, 2], [5, 1], [6, 2], [9, 4],
[5, 2], [5, 4], [7, 4], [6, 5]])
CODET1 = np.array([[3.0000, 3.0000],
[6.2000, 4.0000],
[5.8000, 1.8000]])
CODET2 = np.array([[11.0/3, 8.0/3],
[6.7500, 4.2500],
[6.2500, 1.7500]])
LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
class TestWhiten(object):
def test_whiten(self):
desired = np.array([[5.08738849, 2.97091878],
[3.19909255, 0.69660580],
[4.51041982, 0.02640918],
[4.38567074, 0.95120889],
[2.32191480, 1.63195503]])
for tp in np.array, matrix:
obs = tp([[0.98744510, 0.82766775],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
assert_allclose(whiten(obs), desired, rtol=1e-5)
def test_whiten_zero_std(self):
desired = np.array([[0., 1.0, 2.86666544],
[0., 1.0, 1.32460034],
[0., 1.0, 3.74382172]])
for tp in np.array, matrix:
obs = tp([[0., 1., 0.74109533],
[0., 1., 0.34243798],
[0., 1., 0.96785929]])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_allclose(whiten(obs), desired, rtol=1e-5)
assert_equal(len(w), 1)
assert_(issubclass(w[-1].category, RuntimeWarning))
def test_whiten_not_finite(self):
for tp in np.array, matrix:
for bad_value in np.nan, np.inf, -np.inf:
obs = tp([[0.98744510, bad_value],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
assert_raises(ValueError, whiten, obs)
class TestVq(object):
def test_py_vq(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, matrix:
label1 = py_vq(tp(X), tp(initc))[0]
assert_array_equal(label1, LABEL1)
def test_vq(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, matrix:
label1, dist = _vq.vq(tp(X), tp(initc))
assert_array_equal(label1, LABEL1)
tlabel1, tdist = vq(tp(X), tp(initc))
def test_vq_1d(self):
# Test special rank 1 vq algo, python implementation.
data = X[:, 0]
initc = data[:3]
a, b = _vq.vq(data, initc)
ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
assert_array_equal(a, ta)
assert_array_equal(b, tb)
def test__vq_sametype(self):
a = np.array([1.0, 2.0], dtype=np.float64)
b = a.astype(np.float32)
assert_raises(TypeError, _vq.vq, a, b)
def test__vq_invalid_type(self):
a = np.array([1, 2], dtype=int)
assert_raises(TypeError, _vq.vq, a, a)
def test_vq_large_nfeat(self):
X = np.random.rand(20, 20)
code_book = np.random.rand(3, 20)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
X = X.astype(np.float32)
code_book = code_book.astype(np.float32)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
def test_vq_large_features(self):
X = np.random.rand(10, 5) * 1000000
code_book = np.random.rand(2, 5) * 1000000
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
class TestKMean(object):
def test_large_features(self):
# Generate a data set with large values, and run kmeans on it to
# (regression for 1077).
d = 300
n = 100
m1 = np.random.randn(d)
m2 = np.random.randn(d)
x = 10000 * np.random.randn(n, d) - 20000 * m1
y = 10000 * np.random.randn(n, d) + 20000 * m2
data = np.empty((x.shape[0] + y.shape[0], d), np.double)
data[:x.shape[0]] = x
data[x.shape[0]:] = y
kmeans(data, 2)
def test_kmeans_simple(self):
np.random.seed(54321)
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, matrix:
code1 = kmeans(tp(X), tp(initc), iter=1)[0]
assert_array_almost_equal(code1, CODET2)
def test_kmeans_lost_cluster(self):
# This will cause kmeans to have a cluster with no points.
data = TESTDATA_2D
initk = np.array([[-1.8127404, -0.67128041],
[2.04621601, 0.07401111],
[-2.31149087, -0.05160469]])
kmeans(data, initk)
with suppress_warnings() as sup:
sup.filter(UserWarning,
"One of the clusters is empty. Re-run kmeans with a "
"different initialization")
kmeans2(data, initk, missing='warn')
assert_raises(ClusterError, kmeans2, data, initk, missing='raise')
def test_kmeans2_simple(self):
np.random.seed(12345678)
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, matrix:
code1 = kmeans2(tp(X), tp(initc), iter=1)[0]
code2 = kmeans2(tp(X), tp(initc), iter=2)[0]
assert_array_almost_equal(code1, CODET1)
assert_array_almost_equal(code2, CODET2)
def test_kmeans2_rank1(self):
data = TESTDATA_2D
data1 = data[:, 0]
initc = data1[:3]
code = initc.copy()
kmeans2(data1, code, iter=1)[0]
kmeans2(data1, code, iter=2)[0]
def test_kmeans2_rank1_2(self):
data = TESTDATA_2D
data1 = data[:, 0]
kmeans2(data1, 2, iter=1)
def test_kmeans2_high_dim(self):
# test kmeans2 when the number of dimensions exceeds the number
# of input points
data = TESTDATA_2D
data = data.reshape((20, 20))[:10]
kmeans2(data, 2)
def test_kmeans2_init(self):
np.random.seed(12345)
data = TESTDATA_2D
kmeans2(data, 3, minit='points')
kmeans2(data[:, :1], 3, minit='points') # special case (1-D)
kmeans2(data, 3, minit='++')
kmeans2(data[:, :1], 3, minit='++') # special case (1-D)
# minit='random' can give warnings, filter those
with suppress_warnings() as sup:
sup.filter(message="One of the clusters is empty. Re-run.")
kmeans2(data, 3, minit='random')
kmeans2(data[:, :1], 3, minit='random') # special case (1-D)
@pytest.mark.skipif(sys.platform == 'win32',
reason='Fails with MemoryError in Wine.')
def test_krandinit(self):
data = TESTDATA_2D
datas = [data.reshape((200, 2)), data.reshape((20, 20))[:10]]
k = int(1e6)
for data in datas:
np.random.seed(1234)
init = _krandinit(data, k)
orig_cov = np.cov(data, rowvar=0)
init_cov = np.cov(init, rowvar=0)
assert_allclose(orig_cov, init_cov, atol=1e-2)
def test_kmeans2_empty(self):
# Regression test for gh-1032.
assert_raises(ValueError, kmeans2, [], 2)
def test_kmeans_0k(self):
# Regression test for gh-1073: fail when k arg is 0.
assert_raises(ValueError, kmeans, X, 0)
assert_raises(ValueError, kmeans2, X, 0)
assert_raises(ValueError, kmeans2, X, np.array([]))
def test_kmeans_large_thres(self):
# Regression test for gh-1774
x = np.array([1, 2, 3, 4, 10], dtype=float)
res = kmeans(x, 1, thresh=1e16)
assert_allclose(res[0], np.array([4.]))
assert_allclose(res[1], 2.3999999999999999)
def test_kmeans2_kpp_low_dim(self):
# Regression test for gh-11462
prev_res = np.array([[-1.95266667, 0.898],
[-3.153375, 3.3945]])
np.random.seed(42)
res, _ = kmeans2(TESTDATA_2D, 2, minit='++')
assert_allclose(res, prev_res)
def test_kmeans2_kpp_high_dim(self):
# Regression test for gh-11462
n_dim = 100
size = 10
centers = np.vstack([5 * np.ones(n_dim),
-5 * np.ones(n_dim)])
np.random.seed(42)
data = np.vstack([
np.random.multivariate_normal(centers[0], np.eye(n_dim), size=size),
np.random.multivariate_normal(centers[1], np.eye(n_dim), size=size)
])
res, _ = kmeans2(data, 2, minit='++')
assert_array_almost_equal(res, centers, decimal=0)
| bsd-3-clause | 6,523,043,739,359,492,000 | 38.926045 | 80 | 0.502698 | false |
showerst/openstates | openstates/md/bills.py | 1 | 18648 | #!/usr/bin/env python
import datetime
import re
import lxml.html
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
CHAMBERS = {
'upper': ('SB','SJ'),
'lower': ('HB','HJ'),
}
classifiers = {
r'Committee Amendment .+? Adopted': 'amendment:passed',
r'Favorable': 'committee:passed:favorable',
r'First Reading': 'committee:referred',
r'Floor (Committee )?Amendment\s?\(.+?\)$': 'amendment:introduced',
r'Floor Amendment .+? Rejected': 'amendment:failed',
r'Floor (Committee )?Amendment.+?Adopted': 'amendment:passed',
r'Floor Amendment.+? Withdrawn': 'amendment:withdrawn',
r'Pre\-filed': 'bill:introduced',
r'Re\-(referred|assigned)': 'committee:referred',
r'Recommit to Committee': 'committee:referred',
r'Referred': 'committee:referred',
r'Third Reading Passed': 'bill:passed',
r'Third Reading Failed': 'bill:failed',
r'Unfavorable': 'committee:passed:unfavorable',
r'Vetoed': 'governor:vetoed',
r'Approved by the Governor': 'governor:signed',
r'Conference Committee|Passed Enrolled|Special Order|Senate Concur|Motion|Laid Over|Hearing|Committee Amendment|Assigned a chapter|Second Reading|Returned Passed|House Concur|Chair ruled|Senate Refuses to Concur|Senate Requests': 'other',
}
vote_classifiers = {
r'third': 'passage',
r'fla|amend|amd': 'amendment',
}
def _classify_action(action):
if not action:
return None
ctty = None
for regex, type in classifiers.iteritems():
if re.match(regex, action):
if 'committee:referred' in type:
ctty = re.sub(regex, "", action).strip()
return ( type, ctty )
return ( None, ctty )
def _clean_sponsor(name):
if name.startswith('Delegate') or name.startswith('Senator'):
name = name.split(' ', 1)[1]
if ', District' in name:
name = name.rsplit(',', 1)[0]
return name.strip().strip('*')
def _get_td(doc, th_text):
td = doc.xpath('//th[text()="%s"]/following-sibling::td' % th_text)
if td:
return td[0]
td = doc.xpath('//th/span[text()="%s"]/../following-sibling::td' % th_text)
if td:
return td[0]
class MDBillScraper(BillScraper):
jurisdiction = 'md'
def parse_bill_sponsors(self, doc, bill):
sponsor_list = doc.xpath('//a[@name="Sponlst"]')
if sponsor_list:
# more than one bill sponsor exists
elems = sponsor_list[0].xpath('../../..//dd/a')
for elem in elems:
bill.add_sponsor('cosponsor',
_clean_sponsor(elem.text.strip()))
else:
# single bill sponsor
sponsor = doc.xpath('//a[@name="Sponsors"]/../../dd')[0].text_content()
bill.add_sponsor('primary', _clean_sponsor(sponsor))
def parse_bill_actions(self, doc, bill):
for h5 in doc.xpath('//h5'):
if h5.text == 'House Action':
chamber = 'lower'
elif h5.text == 'Senate Action':
chamber = 'upper'
elif h5.text.startswith('Action after passage'):
chamber = 'governor'
else:
break
dts = h5.getnext().xpath('dl/dt')
for dt in dts:
action_date = dt.text.strip()
if action_date and action_date != 'No Action':
year = int(bill['session'][:4])
action_date += ('/%s' % year)
action_date = datetime.datetime.strptime(action_date,
'%m/%d/%Y')
# no actions after June?, decrement the year on these
if action_date.month > 6:
year -= 1
action_date = action_date.replace(year)
# iterate over all dds following the dt
dcursor = dt
while (dcursor.getnext() is not None and
dcursor.getnext().tag == 'dd'):
dcursor = dcursor.getnext()
actions = dcursor.text_content().split('\r\n')
for act in actions:
act = act.strip()
if not act:
continue
atype, committee = _classify_action(act)
kwargs = {
"type": atype
}
if committee is not None:
kwargs['committees'] = committee
if atype:
bill.add_action(chamber, act, action_date,
**kwargs)
else:
self.log('unknown action: %s' % act)
def parse_bill_documents(self, doc, bill):
bill_text_b = doc.xpath('//b[contains(text(), "Bill Text")]')[0]
for sib in bill_text_b.itersiblings():
if sib.tag == 'a':
bill.add_version(sib.text.strip(','), sib.get('href'),
mimetype='application/pdf')
note_b = doc.xpath('//b[contains(text(), "Fiscal and Policy")]')[0]
for sib in note_b.itersiblings():
if sib.tag == 'a' and sib.text == 'Available':
bill.add_document('Fiscal and Policy Note', sib.get('href'))
def parse_bill_votes(self, doc, bill):
params = {
'chamber': None,
'date': None,
'motion': None,
'passed': None,
'yes_count': None,
'no_count': None,
'other_count': None,
}
elems = doc.xpath('//a')
# MD has a habit of listing votes twice
seen_votes = set()
for elem in elems:
href = elem.get('href')
if (href and "votes" in href and href.endswith('htm') and
href not in seen_votes):
seen_votes.add(href)
vote = self.parse_vote_page(href)
vote.add_source(href)
bill.add_vote(vote)
def parse_vote_page(self, vote_url):
vote_html = self.get(vote_url).text
doc = lxml.html.fromstring(vote_html)
# chamber
if 'senate' in vote_url:
chamber = 'upper'
else:
chamber = 'lower'
# date in the following format: Mar 23, 2009
date = doc.xpath('//td[starts-with(text(), "Legislative")]')[0].text
date = date.replace(u'\xa0', ' ')
date = datetime.datetime.strptime(date[18:], '%b %d, %Y')
# motion
motion = ''.join(x.text_content() for x in \
doc.xpath('//td[@colspan="23"]'))
if motion == '':
motion = "No motion given" # XXX: Double check this. See SJ 3.
motion = motion.replace(u'\xa0', ' ')
# totals
tot_class = doc.xpath('//td[contains(text(), "Yeas")]')[0].get('class')
totals = doc.xpath('//td[@class="%s"]/text()' % tot_class)[1:]
yes_count = int(totals[0].split()[-1])
no_count = int(totals[1].split()[-1])
other_count = int(totals[2].split()[-1])
other_count += int(totals[3].split()[-1])
other_count += int(totals[4].split()[-1])
passed = yes_count > no_count
vote = Vote(chamber=chamber, date=date, motion=motion,
yes_count=yes_count, no_count=no_count,
other_count=other_count, passed=passed)
# go through, find Voting Yea/Voting Nay/etc. and next tds are voters
func = None
for td in doc.xpath('//td/text()'):
td = td.replace(u'\xa0', ' ')
if td.startswith('Voting Yea'):
func = vote.yes
elif td.startswith('Voting Nay'):
func = vote.no
elif td.startswith('Not Voting'):
func = vote.other
elif td.startswith('Excused'):
func = vote.other
elif func:
func(td)
return vote
def scrape_bill_2012(self, chamber, session, bill_id, url):
""" Creates a bill object """
if len(session) == 4:
session_url = session+'rs'
else:
session_url = session
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
# find <a name="Title">, get parent dt, get parent dl, then dd n dl
title = doc.xpath('//a[@name="Title"][1]/../../dd[1]/text()')[0].strip()
summary = doc.xpath('//font[@size="3"]/p/text()')[0].strip()
if 'B' in bill_id:
_type = ['bill']
elif 'J' in bill_id:
_type = ['joint resolution']
bill = Bill(session, chamber, bill_id, title, type=_type,
summary=summary)
bill.add_source(url)
self.parse_bill_sponsors(doc, bill) # sponsors
self.parse_bill_actions(doc, bill) # actions
self.parse_bill_documents(doc, bill) # documents and versions
self.parse_bill_votes(doc, bill) # votes
# subjects
subjects = []
for subj in doc.xpath('//a[contains(@href, "/subjects/")]'):
subjects.append(subj.text.split('-see also-')[0])
bill['subjects'] = subjects
# add bill to collection
self.save_bill(bill)
def scrape_vote(self, bill, action_text, url):
doc = lxml.html.fromstring(self.get(url).text)
date = None
yes_count = no_count = other_count = None
# process action_text - might look like "Vote - Senate Floor - Third Reading Passed (46-0) - 01/16/12"
if action_text.startswith('Vote - Senate Floor - '):
action_text = action_text[22:]
chamber = 'upper'
elif action_text.startswith('Vote - House Floor - '):
action_text = action_text[21:]
chamber = 'lower'
motion, unused_date = action_text.rsplit(' - ', 1)
yes_count, no_count = re.findall('\((\d+)-(\d+)\)', motion)[0]
if 'Passed' in motion:
motion = motion.split(' Passed')[0]
passed = True
elif 'Adopted' in motion:
motion = motion.split(' Adopted')[0]
passed = True
elif 'Rejected' in motion:
motion = motion.split(' Rejected')[0]
passed = False
elif 'Failed' in motion:
motion = motion.split(' Failed')[0]
passed = False
elif 'Concur' in motion:
passed = True
elif 'Floor Amendment' in motion:
passed = int(yes_count) > int(no_count)
elif 'overridden' in motion:
passed = True
motion = 'Veto Override'
else:
raise Exception('unknown motion: %s' % motion)
vote = Vote(chamber=chamber, date=None, motion=motion,
yes_count=int(yes_count), no_count=int(no_count),
other_count=0, passed=passed)
vfunc = None
nobrs = doc.xpath('//nobr/text()')
for text in nobrs:
text = text.replace(u'\xa0', ' ')
if text.startswith('Calendar Date: '):
if vote['date']:
self.warning('two dates!, skipping rest of bill')
break
vote['date'] = datetime.datetime.strptime(text.split(': ', 1)[1], '%b %d, %Y %H:%M %p')
elif 'Yeas' in text and 'Nays' in text and 'Not Voting' in text:
yeas, nays, nv, exc, absent = re.match('(\d+) Yeas\s+(\d+) Nays\s+(\d+) Not Voting\s+(\d+) Excused \(Absent\)\s+(\d+) Absent', text).groups()
vote['yes_count'] = int(yeas)
vote['no_count'] = int(nays)
vote['other_count'] = int(nv) + int(exc) + int(absent)
elif 'Voting Yea' in text:
vfunc = vote.yes
elif 'Voting Nay' in text:
vfunc = vote.no
elif 'Not Voting' in text or 'Excused' in text:
vfunc = vote.other
elif vfunc:
if ' and ' in text:
a, b = text.split(' and ')
vfunc(a)
vfunc(b)
else:
vfunc(text)
vote.validate()
vote.add_source(url)
bill.add_vote(vote)
def scrape_bill(self, chamber, session, bill_id, url):
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
title = doc.xpath('//h3[@class="h3billright"]')[0].text_content()
# TODO: grab summary (none present at time of writing)
if 'B' in bill_id:
_type = ['bill']
elif 'J' in bill_id:
_type = ['joint resolution']
else:
raise ValueError('unknown bill type ' + bill_id)
bill = Bill(session, chamber, bill_id, title, type=_type)
bill.add_source(url)
# process sponsors
sponsors = _get_td(doc, 'All Sponsors:').text_content()
sponsors = sponsors.replace('Delegates ', '')
sponsors = sponsors.replace('Delegate ', '')
sponsors = sponsors.replace('Senator ', '')
sponsors = sponsors.replace('Senators ', '')
sponsor_type = 'primary'
for sponsor in re.split(', (?:and )?', sponsors):
sponsor = sponsor.strip()
if not sponsor:
continue
bill.add_sponsor(sponsor_type, sponsor)
sponsor_type = 'cosponsor'
# subjects
subject_list = []
for heading in ('Broad Subject(s):', 'Narrow Subject(s):'):
subjects = _get_td(doc, heading).xpath('a/text()')
subject_list += [s.split(' -see also-')[0] for s in subjects if s]
bill['subjects'] = subject_list
# documents
self.scrape_documents(bill, url.replace('stab=01', 'stab=02'))
# actions
self.scrape_actions(bill, url.replace('stab=01', 'stab=03'))
self.save_bill(bill)
def scrape_documents(self, bill, url):
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
for td in doc.xpath('//table[@class="billdocs"]//td'):
a = td.xpath('a')[0]
if a.text == 'Text':
bill.add_version('Bill Text', a.get('href'),
mimetype='application/pdf')
elif a.text == 'Analysis':
bill.add_document(a.tail.replace(' - ', ' ').strip(),
a.get('href'), mimetype='application/pdf')
elif a.text in ('Bond Bill Fact Sheet',
"Attorney General's Review Letter",
"Governor's Veto Letter",
):
bill.add_document(a.text, a.get('href'),
mimetype='application/pdf')
elif a.text in ('Amendments', 'Conference Committee Amendment',
'Conference Committee Report'):
bill.add_document(a.text + ' - ' + a.tail.strip(),
a.get('href'), mimetype='application/pdf')
elif a.text == 'Vote - Senate - Committee':
bill.add_document('Senate %s Committee Vote' %
a.tail.replace(' - ', ' ').strip(),
a.get('href'), mimetype='application/pdf')
elif a.text == 'Vote - House - Committee':
bill.add_document('House %s Committee Vote' %
a.tail.replace(' - ', ' ').strip(),
a.get('href'), mimetype='application/pdf')
elif a.text == 'Vote - Senate Floor':
self.scrape_vote(bill, td.text_content(), a.get('href'))
elif a.text == 'Vote - House Floor':
self.scrape_vote(bill, td.text_content(), a.get('href'))
else:
raise ValueError('unknown document type: %s', a.text)
def scrape_actions(self, bill, url):
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
for row in doc.xpath('//table[@class="billgrid"]/tr')[1:]:
new_chamber, cal_date, leg_date, action, proceedings = row.xpath('td')
if new_chamber.text == 'Senate':
chamber = 'upper'
elif new_chamber.text == 'House':
chamber = 'lower'
elif new_chamber.text == 'Post Passage':
chamber = 'executive'
elif new_chamber.text is not None:
raise ValueError('unexpected chamber: ' + new_chamber.text)
action = action.text
if cal_date.text:
action_date = datetime.datetime.strptime(cal_date.text, '%m/%d/%Y')
atype, committee = _classify_action(action)
kwargs = { "type": atype }
if committee is not None:
kwargs['committees'] = committee
bill.add_action(chamber, action, action_date, **kwargs)
def scrape(self, chamber, session):
session_slug = session if 's' in session else session + 'rs'
main_page = 'http://mgaleg.maryland.gov/webmga/frmLegislation.aspx?pid=legisnpage&tab=subject3&ys=' + session_slug
chamber_prefix = 'S' if chamber == 'upper' else 'H'
html = self.get(main_page).text
doc = lxml.html.fromstring(html)
ranges = doc.xpath('//table[@class="box1leg"]//td/text()')
for range_text in ranges:
match = re.match('(\w{2})0*(\d+) - \wB0*(\d+)', range_text.strip())
if match:
prefix, begin, end = match.groups()
if prefix[0] == chamber_prefix:
self.debug('scraping %ss %s-%s', prefix, begin, end)
for number in range(int(begin), int(end)+1):
bill_id = prefix + str(number)
url = 'http://mgaleg.maryland.gov/webmga/frmMain.aspx?id=%s&stab=01&pid=billpage&tab=subject3&ys=%s' % (bill_id, session_slug)
if session < '2013':
self.scrape_bill_2012(chamber, session, bill_id, url)
else:
self.scrape_bill(chamber, session, bill_id, url)
| gpl-3.0 | -4,186,206,538,854,430,700 | 38.258947 | 242 | 0.505899 | false |
liqiangnlp/LiNMT | scripts/hint.aware/random.n.words/LiNMT-random-template.py | 1 | 2727 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Qiang Li
# Email: [email protected]
# Time: 13:59, 03/27/2017
import sys
import codecs
import argparse
import random
from io import open
argparse.open = open
reload(sys)
sys.setdefaultencoding('utf8')
if sys.version_info < (3, 0):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
else:
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
def create_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Random Template')
parser.add_argument(
'--input', '-i', type=argparse.FileType('r'), default=sys.stdin,
metavar='PATH', help='Input text (default: standard input).')
parser.add_argument(
'--output', '-o', type=argparse.FileType('w'), default=sys.stdout,
metavar='PATH', help='Output file for random template (default: standard output)')
parser.add_argument(
'--log', '-l', type=argparse.FileType('w'), default=sys.stderr,
metavar='PATH', help='Output log file (default: standard error)')
parser.add_argument(
'--percentage', '-p', type=float, default=0.2, metavar='PERT', help='replace percentage words into #Xi symbols. (default: %(default)s)')
return parser
def random_template(fobj, pert, ofobj, oflog):
for line in fobj:
line = line.strip()
v_words = line.split()
# print 'word size {}'.format(len(v_words))
oflog.write('{0}\n'.format(len(v_words)))
output_string = ''
i = 0;
for word in v_words:
# print 'random: {}'.format(random.uniform(0, len(v_words)))
f_i = random.uniform(0, len(v_words))
f_j = f_i / len(v_words)
#print '{0} {1} {2}'.format(f_i, len(v_words), f_j)
if f_j < pert:
i += 1
# print 'f_j = {0}, pert = {1}'.format(f_j, pert)
output_string += ' #X'+str(i)
else:
output_string += ' '+word
output_string = output_string.strip()
ofobj.write('{0}\n'.format(output_string))
if __name__ == '__main__':
parser = create_parser()
args = parser.parse_args()
# read/write files as UTF-8
if args.input.name != '<stdin>':
args.input = codecs.open(args.input.name, encoding='utf-8', errors='ignore')
if args.output.name != '<stdout>':
args.output = codecs.open(args.output.name, 'w', encoding='utf-8')
if args.log.name != '<stderr>':
args.log = codecs.open(args.log.name, 'w', encoding='utf-8')
random_template(args.input, args.percentage, args.output, args.log)
| mit | -3,784,532,567,617,443,000 | 28.967033 | 140 | 0.639531 | false |
erudit/eruditorg | eruditorg/core/subscription/management/commands/import_restrictions.py | 1 | 15791 | import sentry_sdk
import structlog
import datetime as dt
import os.path as op
from django.core.exceptions import ValidationError
from django.core.files import File
from django.core.management.base import BaseCommand
from django.db import transaction
from erudit.models import Journal
from erudit.models import Organisation
from core.accounts.models import LegacyAccountProfile
from core.subscription.models import InstitutionReferer
from core.accounts.shortcuts import get_or_create_legacy_user
from core.subscription.models import InstitutionIPAddressRange
from core.subscription.models import JournalAccessSubscription
from core.subscription.models import JournalAccessSubscriptionPeriod
from core.subscription.restriction.conf import settings as restriction_settings
from core.subscription.restriction.models import (
Abonne,
Ipabonne,
Ipabonneinterval,
Revue,
Revueabonne,
)
class ImportException(Exception):
pass
created_objects = {
"subscription": 0,
"user": 0,
"period": 0,
"iprange": 0,
}
@transaction.atomic
def delete_stale_subscriptions(year: int, logger: structlog.BoundLogger, organisation_id=None):
"""Update stale subscription for the given year
A stale subscription is a subscriptions that exists in the eruditorg database
but does not exist in the restriction database. When an organisation is unsubscribed,
the corresponding rows in revueabonne are not updated, they are simply deleted.
This function will determine the set of subscriptions present in eruditorg, the
set of subscriptions present in restriction, and diff them to find the set of
subscriptions that are present in eruditorg but not in restriction. It will then
update them to delete all their journals and subscription periods.
:param year: the year for which stale subscriptions should be deleted
:param logger: the logger to use
:param organisation_id: limit deleting stale subscriptions of a specific organisation
"""
# Get all Revueabonne for the given year.
abonneid_for_year = (
Revueabonne.objects.filter(anneeabonnement=year)
.order_by("abonneid")
.values_list("abonneid", flat=True)
.distinct()
)
orgs_with_no_revueabonne = Organisation.objects.exclude(account_id__in=set(abonneid_for_year))
# Get all organisations that have a valid subscription
orgs_with_valid_subscription = Organisation.objects.filter(
pk__in=JournalAccessSubscription.valid_objects.institutional()
.values_list("organisation", flat=True)
.distinct()
)
if organisation_id is not None:
orgs_with_valid_subscription = orgs_with_valid_subscription.filter(
account_id=organisation_id
)
# diff the sets and find the subscribers with no revueabonne
orgs_with_subscription_and_no_revueabonne = orgs_with_valid_subscription.filter(
pk__in=[o.pk for o in orgs_with_no_revueabonne]
)
# get their subscriptions
stale_subscriptions = set(
JournalAccessSubscription.valid_objects.institutional().filter(
organisation__in=orgs_with_subscription_and_no_revueabonne
)
)
# Delete their periods
nowd = dt.datetime.now()
JournalAccessSubscriptionPeriod.objects.filter(
subscription__in=stale_subscriptions,
start__lte=nowd,
end__gte=nowd,
).delete()
for subscription in stale_subscriptions:
logger.info(
"subscription.stale_subscription_deleted",
subscription_pk=subscription.pk,
organisation=subscription.organisation.name,
)
subscription.journals.clear()
subscription.save()
class DryRun(transaction.Atomic):
def __init__(self, using=None, savepoint=True, dry_run=False):
super().__init__(using, savepoint, durable=False)
self.dry_run = dry_run
def __enter__(self):
super().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
if self.dry_run:
transaction.set_rollback(True)
super().__exit__(exc_type, exc_value, traceback)
class Command(BaseCommand):
""" Import restrictions from the restriction database """
help = 'Import data from the "restriction" database'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.created_subscriptions = set()
def add_arguments(self, parser):
parser.add_argument(
"--organisation-id",
action="store",
dest="organisation_id",
help="id of the organisation to import",
)
parser.add_argument(
"--dry-run",
action="store_true",
dest="dry_run",
help="run in dry run mode",
default=False,
)
parser.add_argument(
"--year",
action="store",
dest="year",
help="year to import",
type=int,
default=dt.datetime.now().year,
)
def handle(self, *args, **options):
organisation_id = options.get("organisation_id", None)
year = options.get("year")
dry_run = options.get("dry_run")
logger = structlog.get_logger(__name__)
if dry_run:
logger = logger.bind(dry_run=dry_run)
restriction_subscriptions = Revueabonne.objects.filter(anneeabonnement=year)
if organisation_id:
restriction_subscriptions = restriction_subscriptions.filter(abonneid=organisation_id)
restriction_subscriber_ids = (
restriction_subscriptions.order_by("abonneid")
.values_list("abonneid", flat=True)
.distinct()
)
logger.info(
"import.started", import_type="restrictions", to_process=len(restriction_subscriber_ids)
)
with DryRun(dry_run=dry_run):
for subscriber_id in restriction_subscriber_ids:
# Fetches the subscriber
try:
subscriber = Abonne.objects.get(pk=subscriber_id)
except Abonne.DoesNotExist:
logger.error("Abonne.DoesNotExist", abonne_id=subscriber_id)
raise ImportException
subscription_qs = restriction_subscriptions.filter(abonneid=subscriber_id)
try:
self.import_restriction_subscriber(subscriber, subscription_qs, logger=logger)
except ImportException:
pass
delete_stale_subscriptions(year, logger, organisation_id=organisation_id)
logger.info("import.finished", **created_objects)
@transaction.atomic
def import_restriction_subscriber(
self, restriction_subscriber: Abonne, subscription_qs, logger=None
):
if not logger:
logger = structlog.get_logger(__name__)
logger = logger.bind(subscriber_id=restriction_subscriber.pk)
if not restriction_subscriber.courriel:
logger.warning(
"import_restriction_subscriber", msg="Cannot import subscriber with empty email"
)
return
try:
organisation, created = Organisation.objects.get_or_create(
account_id=restriction_subscriber.pk
)
organisation.name = restriction_subscriber.abonne
organisation.sushi_requester_id = restriction_subscriber.requesterid
organisation.account_id = restriction_subscriber.pk
organisation.save()
if created:
logger.info("organisation.created", pk=organisation.pk, name=organisation.name)
finally:
organisation.save()
# gets or creates the RestrictionProfile instance
# --
try:
restriction_profile = LegacyAccountProfile.objects.filter(
origin=LegacyAccountProfile.DB_RESTRICTION
).get(legacy_id=str(restriction_subscriber.pk))
user = restriction_profile.user
user.email = restriction_subscriber.courriel
user.save()
except LegacyAccountProfile.DoesNotExist:
username = "restriction-{}".format(restriction_subscriber.pk)
user, created = get_or_create_legacy_user(
username=username, email=restriction_subscriber.courriel
)
if created:
created_objects["user"] += 1
logger.info(
"user.created",
pk=user.pk,
username=username,
email=restriction_subscriber.courriel,
)
restriction_profile = LegacyAccountProfile.objects.create(
origin=LegacyAccountProfile.DB_RESTRICTION,
legacy_id=str(restriction_subscriber.pk),
user=user,
organisation=organisation,
)
if restriction_subscriber.icone:
f = open(
op.join(restriction_settings.ABONNE_ICONS_PATH, restriction_subscriber.icone),
"rb",
)
image_file = File(f)
organisation.badge.save(restriction_subscriber.icone, image_file, save=True)
organisation.save()
f.close()
finally:
organisation.members.add(user)
organisation.save()
# Delete all subscriptions for this subscriber!
#
# Why can we do this? Because this import script is the *only* source of subscription
# information. Because of this, we can happily go ahead with a "delete and repopulate"
# approach. If we don't delete our stuff, subscription deletions in Victor won't properly
# be imported: subscription will stay here forever.
# failsafe to ensure that we don't mistakenly delete subscriptions that aren't institutional
if restriction_profile.organisation is None:
raise ValidationError("Organisation is required")
try:
subscription = JournalAccessSubscription.objects.filter(
organisation=restriction_profile.organisation
).get()
subscription.journals.clear()
subscription.journalaccesssubscriptionperiod_set.all().delete()
subscription.referers.all().delete()
subscription.institutionipaddressrange_set.all().delete()
except JournalAccessSubscription.DoesNotExist:
pass
for subscription in subscription_qs.all():
self.import_restriction_subscription(
subscription, restriction_subscriber, restriction_profile, logger=logger
)
def import_restriction_subscription(
self, restriction_subscription, restriction_subscriber, restriction_profile, logger=None
):
if not logger:
logger = structlog.get_logger(__name__)
logger = logger.bind(
subscriber_id=restriction_subscriber.pk, subscription_id=restriction_subscription.pk
)
#
# Fetch the related journal
try:
restriction_journal = Revue.objects.get(revueid=restriction_subscription.revueid)
except Revue.DoesNotExist:
logger.error("Revue.DoesNotExist", revue_id=restriction_subscription.revueid)
return
# gets or creates a JournalAccessSubscription instance
# --
with sentry_sdk.configure_scope() as scope:
scope.fingerprint = ["Journal.DoesNotExist"]
try:
journal_code = restriction_journal.titrerevabr.lower()
journal = Journal.legacy_objects.get_by_id(journal_code)
except Journal.DoesNotExist:
logger.error("Journal.DoesNotExist", titrerevabr=restriction_journal.titrerevabr)
return
subscription, subscription_created = JournalAccessSubscription.objects.get_or_create(
organisation=restriction_profile.organisation
)
logger = logger.bind(subscription_pk=subscription.pk)
if subscription_created:
created_objects["subscription"] += 1
logger.info("subscription.created")
if not subscription.journals.filter(pk=journal.pk):
subscription.journals.add(journal)
logger.info("subscription.add_journal", journal_pk=journal.pk)
# creates the subscription period
# --
if subscription.pk not in self.created_subscriptions:
self.created_subscriptions.add(subscription.pk)
start_date = dt.date(restriction_subscription.anneeabonnement, 1, 1)
end_date = dt.date(restriction_subscription.anneeabonnement + 1, 1, 1)
subscription_period, created = JournalAccessSubscriptionPeriod.objects.get_or_create(
subscription=subscription, start=start_date, end=end_date
)
if created:
created_objects["period"] += 1
logger.info(
"subscriptionperiod.created",
pk=subscription_period.pk,
start=start_date,
end=end_date,
)
try:
subscription_period.clean()
except ValidationError:
# We are saving multiple periods for multiple journals under the same subscription
# instance so period validation errors can happen.
logger.error("subscriptionperiod.validationerror")
raise
else:
subscription_period.save()
# create the subscription referer
# --
if restriction_subscriber.referer:
referer, created = InstitutionReferer.objects.get_or_create(
subscription=subscription, referer=restriction_subscriber.referer
)
if created:
logger.info("referer.created", referer=restriction_subscriber.referer)
# creates the IP whitelist associated with the subscription
# --
restriction_subscriber_ips_set1 = Ipabonne.objects.filter(
abonneid=str(restriction_subscriber.pk)
)
for ip in restriction_subscriber_ips_set1:
ip_start, ip_end = get_ip_range_from_ip(ip.ip)
ip_range, created = InstitutionIPAddressRange.objects.get_or_create(
subscription=subscription, ip_start=ip_start, ip_end=ip_end
)
if created:
created_objects["iprange"] += 1
logger.info("ipabonne.created", ip_start=ip_start, ip_end=ip_end)
restriction_subscriber_ips_ranges = Ipabonneinterval.objects.filter(
abonneid=restriction_subscriber.pk
)
for ip_range in restriction_subscriber_ips_ranges:
ip_start = get_ip(ip_range.debutinterval, repl="0")
ip_end = get_ip(ip_range.fininterval, repl="255")
ip_range, created = InstitutionIPAddressRange.objects.get_or_create(
subscription=subscription, ip_start=ip_start, ip_end=ip_end
)
if created:
created_objects["iprange"] += 1
logger.info("ipabonneinterval.created", ip_start=ip_start, ip_end=ip_end)
def get_ip_range_from_ip(ip):
if "*" not in ip:
return ip, ip
return get_ip(ip, repl="0"), get_ip(ip, repl="255")
def get_ip(ip, repl="0"):
ipl = ip.split(".")
ipl_new = [repl if n == "*" else n for n in ipl]
return ".".join(ipl_new)
| gpl-3.0 | 9,023,072,052,414,946,000 | 36.597619 | 100 | 0.62086 | false |
peterlauri/django | tests/distinct_on_fields/tests.py | 21 | 5863 | from __future__ import unicode_literals
from django.db.models import Max
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import str_prefix
from .models import Celebrity, Fan, Staff, StaffTag, Tag
@skipUnlessDBFeature('can_distinct_on_fields')
@skipUnlessDBFeature('supports_nullable_unique_constraints')
class DistinctOnTests(TestCase):
def setUp(self):
t1 = Tag.objects.create(name='t1')
Tag.objects.create(name='t2', parent=t1)
t3 = Tag.objects.create(name='t3', parent=t1)
Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
self.p1_o1 = Staff.objects.create(id=1, name="p1", organisation="o1")
self.p2_o1 = Staff.objects.create(id=2, name="p2", organisation="o1")
self.p3_o1 = Staff.objects.create(id=3, name="p3", organisation="o1")
self.p1_o2 = Staff.objects.create(id=4, name="p1", organisation="o2")
self.p1_o1.coworkers.add(self.p2_o1, self.p3_o1)
StaffTag.objects.create(staff=self.p1_o1, tag=t1)
StaffTag.objects.create(staff=self.p1_o1, tag=t1)
celeb1 = Celebrity.objects.create(name="c1")
celeb2 = Celebrity.objects.create(name="c2")
self.fan1 = Fan.objects.create(fan_of=celeb1)
self.fan2 = Fan.objects.create(fan_of=celeb1)
self.fan3 = Fan.objects.create(fan_of=celeb2)
def test_basic_distinct_on(self):
"""QuerySet.distinct('field', ...) works"""
# (qset, expected) tuples
qsets = (
(
Staff.objects.distinct().order_by('name'),
['<Staff: p1>', '<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Staff.objects.distinct('name').order_by('name'),
['<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Staff.objects.distinct('organisation').order_by('organisation', 'name'),
['<Staff: p1>', '<Staff: p1>'],
),
(
Staff.objects.distinct('name', 'organisation').order_by('name', 'organisation'),
['<Staff: p1>', '<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Celebrity.objects.filter(fan__in=[self.fan1, self.fan2, self.fan3]).distinct('name').order_by('name'),
['<Celebrity: c1>', '<Celebrity: c2>'],
),
# Does combining querysets work?
(
(Celebrity.objects.filter(fan__in=[self.fan1, self.fan2]).
distinct('name').order_by('name') |
Celebrity.objects.filter(fan__in=[self.fan3]).
distinct('name').order_by('name')),
['<Celebrity: c1>', '<Celebrity: c2>'],
),
(
StaffTag.objects.distinct('staff', 'tag'),
['<StaffTag: t1 -> p1>'],
),
(
Tag.objects.order_by('parent__pk', 'pk').distinct('parent'),
['<Tag: t2>', '<Tag: t4>', '<Tag: t1>'],
),
(
StaffTag.objects.select_related('staff').distinct('staff__name').order_by('staff__name'),
['<StaffTag: t1 -> p1>'],
),
# Fetch the alphabetically first coworker for each worker
(
(Staff.objects.distinct('id').order_by('id', 'coworkers__name').
values_list('id', 'coworkers__name')),
[str_prefix("(1, %(_)s'p2')"), str_prefix("(2, %(_)s'p1')"),
str_prefix("(3, %(_)s'p1')"), "(4, None)"]
),
)
for qset, expected in qsets:
self.assertQuerysetEqual(qset, expected)
self.assertEqual(qset.count(), len(expected))
# Combining queries with different distinct_fields is not allowed.
base_qs = Celebrity.objects.all()
with self.assertRaisesMessage(AssertionError, "Cannot combine queries with different distinct fields."):
base_qs.distinct('id') & base_qs.distinct('name')
# Test join unreffing
c1 = Celebrity.objects.distinct('greatest_fan__id', 'greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(c1.query))
c2 = c1.distinct('pk')
self.assertNotIn('OUTER JOIN', str(c2.query))
def test_distinct_not_implemented_checks(self):
# distinct + annotate not allowed
with self.assertRaises(NotImplementedError):
Celebrity.objects.annotate(Max('id')).distinct('id')[0]
with self.assertRaises(NotImplementedError):
Celebrity.objects.distinct('id').annotate(Max('id'))[0]
# However this check is done only when the query executes, so you
# can use distinct() to remove the fields before execution.
Celebrity.objects.distinct('id').annotate(Max('id')).distinct()[0]
# distinct + aggregate not allowed
with self.assertRaises(NotImplementedError):
Celebrity.objects.distinct('id').aggregate(Max('id'))
def test_distinct_on_in_ordered_subquery(self):
qs = Staff.objects.distinct('name').order_by('name', 'id')
qs = Staff.objects.filter(pk__in=qs).order_by('name')
self.assertSequenceEqual(qs, [self.p1_o1, self.p2_o1, self.p3_o1])
qs = Staff.objects.distinct('name').order_by('name', '-id')
qs = Staff.objects.filter(pk__in=qs).order_by('name')
self.assertSequenceEqual(qs, [self.p1_o2, self.p2_o1, self.p3_o1])
def test_distinct_on_get_ordering_preserved(self):
"""
Ordering shouldn't be cleared when distinct on fields are specified.
refs #25081
"""
staff = Staff.objects.distinct('name').order_by('name', '-organisation').get(name='p1')
self.assertEqual(staff.organisation, 'o2')
| bsd-3-clause | -790,203,134,454,279,000 | 44.1 | 118 | 0.563363 | false |
chengstone/FindFaceInVideo | VGGFace/lfw_test_deal.py | 1 | 8246 | # -*- coding: utf-8 -*-
'''
@authot:李华清
@brief:根据lfw网站所给的图片(lfw,lfwcrop_grey,lfwcrop_color)和pairs.txt,写出需要比较的两幅图像的路径(left.txt,right.txt)以及人脸匹配信息(label.txt)
@brief:选择性进行图像尺度变换和灰度化
'''
import os
import numpy as np
#import cv2
#得到一个文件夹下的子文件夹
def dir_subdir(dirname):
dirname=str(dirname)
if dirname=="":
return []
if dirname[-1]!="/":
dirname=dirname+"/"
dirlist = os.listdir(dirname)
subdir_list=[];
for x in dirlist:
if os.path.isdir(dirname+x):
subdir_list.append(x)
return subdir_list
#得到一个文件夹下,指定后缀名的文件
def dir_file(dirname,ext):
dirname=str(dirname)
if dirname=="":
return []
if dirname[-1]!="/":
dirname=dirname+"/"
dirlist=os.listdir(dirname)
filelist=[]
for x in dirlist:
if not('.' in x):
continue
if x.split('.')[-1]==ext:
filelist.append(x)
return filelist
#读取LFW的pairs.txt保存到result中
#result是列表,paris是字典,字典元素flag,img1,img2,num1,num2
def read_paris(filelist="pairs.txt"):
filelist=str(filelist)
fp=open(filelist,'r')
result=[]
for lines in fp.readlines():
lines=lines.replace("\n","").split("\t")
if len(lines)==2:
print "lenth=2:"+str(lines)
continue
elif len(lines)==3:
pairs={
'flag':1,
'img1':lines[0],
'img2':lines[0],
'num1':lines[1],
'num2':lines[2],
}
result.append(pairs)
continue
elif len(lines)==4:
pairs={
'flag':2,
'img1':lines[0],
'num1':lines[1],
'img2':lines[2],
'num2':lines[3],
}
result.append(pairs)
else:
print "read file Error!"
exit()
fp.close
print "Read paris.txt DONE!"
return result
#找到文件夹下后缀名为ext的文件,可包含子文件夹
def dirdir_list(lfwdir,ext='jpg'):
subdirlist=dir_subdir(lfwdir)
filelist=[]
#含有子文件夹
if len(subdirlist):
for i in subdirlist:
list=dir_file(lfwdir+i,ext)
for j in list:
j=lfwdir+i+'/'+j
filelist.append(j)
#不包含子文件夹
else:
list=dir_file(lfwdir,ext)
for line in list:
line=lfwdir+line
filelist.append(line)
return filelist
#def grey_resize(lfwdir,filelist,desdir='lfw_change/',grey=False,resize=False,height=64,width=64):
# if (grey==False and resize==False):
# return []
# #创建目的文件夹
# if not os.path.exists(desdir):
# os.makedirs(desdir)
# if desdir[-1]!='/':
# desdir=desdir+'/'
# #判断文件夹下是否含有子文件夹
# flag=0
# subdir=dir_subdir(lfwdir)
# if len(subdir):
# flag=1
# #创建文件夹
# if flag==1:
# for path in filelist:
# path=path.split('/')
# if not os.path.exists(desdir+path[-2]):
# os.makedirs(desdir+path[-2])
# #处理后图像所放路径
# filelistnew=[]
# for path in filelist:
# path=path.split('/')
# if flag==1:
# filelistnew.append(desdir+path[-2]+'/'+path[-1])
# else:
# filelistnew.append(desdir+path[-1])
# #进行灰度化和尺度变换
# num=0
# nums=len(filelistnew)
# for line in filelist:
# img=cv2.imread(line)
# if grey==True:
# if num%100==0:
# print "grey:"+str(num)+'/'+str(nums)
# img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# if resize==True:
# if num%100==0:
# print "resize:"+str(num)+'/'+str(nums)
# img=cv2.resize(img,(height,width))
# cv2.imwrite(filelistnew[num],img)
# num=num+1
# return filelistnew
#根据pairs.list,灰度化或者统一图像尺度后,得到left.txt,right.txt,label.txt
def split_pairs(pairslist,lfwdir,ext='jpg'):
num=0
sum=len(pairslist)
#lfw图像组织形式,只有图像:0,文件夹+图像:1
flag=0
subdir=dir_subdir(lfwdir)
if len(subdir):
flag=1
path_left=""
path_right=""
label=""
#left.txt and right.txt
#文件夹+图像形式
print "split pairs.txt"
if flag==1:
for lines in pairslist:
num=num+1
if num%100==0:
print str(num)+"/"+str(sum)
dir_left=lfwdir+lines['img1']+'/'
dir_right=lfwdir+lines['img2']+'/'
file_left=lines['img1']+'_'+str("%04d" % int(lines["num1"]))+'.'+ext
file_right=lines['img2']+'_'+str("%04d" % int(lines["num2"]))+'.'+ext
path_left=path_left+dir_left+file_left+"\n"
path_right=path_right+dir_right+file_right+"\n"
#图像
else:
for lines in pairslist:
num=num+1
print str(num)+"/"+str(sum)
path_left=path_left+lfwdir+lines['img1']+'_'+str("%04d" % int(lines["num1"]))+'.'+ext+"\n"
path_right=path_right+lfwdir+lines['img2']+'_'+str("%04d" % int(lines["num2"]))+'.'+ext+"\n"
#label.txt
for lines in pairslist:
if int(lines["flag"])==1:
label=label+"0\n"
else:
label=label+"1\n"
result={
'path_left':path_left,
'path_right':path_right,
'label':label}
return result
def testdeal(pairs='pairs.txt',lfwdir='lfw/',ext='jpg',changdir='lfw_chang/',grey=False,resize=False,height=64,width=64):
pairslist=read_paris(pairs)
#if grey==True or resize==True:
# filelist=dirdir_list(lfwdir,ext)
# filelist=grey_resize(lfwdir,filelist,changdir,grey,resize,height,width)
# lfwdir=changdir
pairs_result=split_pairs(pairslist,lfwdir,ext)
return pairs_result
def write_pairs(pairs_result,savepath):
if not os.path.exists(savepath):
os.makedirs(savepath)
left=savepath+'left.txt'
right=savepath+'right.txt'
label=savepath+'label.txt'
fp_left=open(left,'w')
fp_right=open(right,'w')
fp_label=open(label,'w')
fp_left.write(pairs_result['path_left'])
fp_right.write(pairs_result['path_right'])
fp_label.write(pairs_result['label'])
fp_left.close()
fp_right.close()
fp_label.close()
def demo_lfw():
caffe_dir='/home/ikode/caffe-master/'
pairs=caffe_dir+'data/deepID/pairs.txt'
lfwdir=caffe_dir+'data/deepID/lfwcrop_color/faces/'
ext='ppm'
pairs_result=testdeal(pairs,lfwdir,ext)
savepath=caffe_dir+'examples/deepID/lfw_'
write_pairs(pairs_result,savepath)
def grey_pairs():
DEEPID='data/deepID_grey/'
pairs=DEEPID+'pairs.txt'
lfwdir=DEEPID+'lfwcrop_grey/faces/'
ext='pgm'
pairs_result=testdeal(pairs,lfwdir,ext)
return pairs_result
def demo_webface_resize():
DEEPID='/home/ikode/caffe-master/data/deepID/'
pairs=DEEPID+'pairs.txt'
# lfwdir=DEEPID+'lfwcrop_color/faces/'
lfwdir='/media/ikode/Document/big_materials/document/deep_learning/caffe/face_datasets/webface/croped/'
ext='jpg'
lfw_chang='/media/ikode/Document/big_materials/document/deep_learning/caffe/face_datasets/webface/change/'
pairs_result=testdeal(pairs,lfwdir,ext,lfw_chang,False,True)
savepath=caffe_dir+'examples/deepID/webface_'
write_pairs(pairs_result,savepath)
def demo_color_resize():
DEEPID='/home/ikode/caffe-master/data/deepID/'
pairs=DEEPID+'pairs.txt'
# lfwdir=DEEPID+'lfwcrop_color/faces/'
lfwdir='/media/ikode/Document/big_materials/document/deep_learning/caffe/face_datasets/webface/croped/'
ext='jpg'
lfw_chang='/media/ikode/Document/big_materials/document/deep_learning/caffe/face_datasets/webface/change/'
pairs_result=testdeal(pairs,lfwdir,ext,lfw_chang,False,True)
return pairs_result
if __name__=='__main__':
demo_lfw()
| bsd-2-clause | -1,247,507,121,667,997,700 | 26.422535 | 121 | 0.571135 | false |
fridex/fabric8-analytics-worker | f8a_worker/workers/anitya.py | 1 | 9373 | """
Adds project to Anitya, which will keep track of its latest version.
"""
import requests
from f8a_worker.enums import EcosystemBackend
from f8a_worker.errors import TaskError
from f8a_worker.utils import DownstreamMapCache, MavenCoordinates
from f8a_worker.base import BaseTask
# name of "RPM" distro for Anitya
RH_RPM_DISTRO_NAME = 'rh-dist-git'
# name of "Maven" distro for Anitya
RH_MVN_DISTRO_NAME = 'rh-mvn'
RH_MVN_GA_REPO = 'https://maven.repository.redhat.com/ga'
class AnityaTask(BaseTask):
"""Anitya task is responsible for making sure that given project from given ecosystem is
tracked at used Anitya instance. If it is, nothing is done, else it is added there during
the task execution.
In future, we'll likely want to make this task also add upstream/downstream mapping
to given project.
The actual latest_version info is fetched dynamically:
- f8a_worker.models.Analysis.latest_version is a dynamic property that does this
- for the stack-info endpoint, we do this on API level for every component of the stack
"""
_analysis_name = 'anitya'
description = 'Adds project to Anitya to be tracked'
_backend_to_anitya_ecosystem = {
EcosystemBackend.npm: 'npm',
EcosystemBackend.maven: 'maven',
EcosystemBackend.pypi: 'pypi',
EcosystemBackend.rubygems: 'rubygems',
EcosystemBackend.nuget: 'nuget',
EcosystemBackend.scm: 'go'
}
def _get_project_homepage(self, ecosystem, package):
wr = self.parent_task_result('metadata')
# Anitya requires homepage, so we must always return something
# Anitya has uniqueness requirement on project + homepage, so make homepage project-unique
homepage = None
if wr:
homepages =\
[m.get("homepage") for m in wr.get('details', []) if m.get("homepage")]
homepage = homepages[0] if homepages else None
if homepage is not None:
return homepage
else:
return self.configuration.ANITYA_URL + \
'/api/by_ecosystem/{e}/{p}'.format(e=ecosystem, p=package)
def _get_artifact_hash(self, algorithm=None):
wr = self.parent_task_result('digests')
if wr:
for details in wr['details']:
if details.get('artifact'):
return details[algorithm or 'md5']
return None
def _create_anitya_project(self, ecosystem, package, homepage):
eco_model = self.storage.get_ecosystem(ecosystem)
backend = self._backend_to_anitya_ecosystem.get(eco_model.backend, None)
if backend is None:
raise ValueError('Don\'t know how to add ecosystem {e} with backend {b} to Anitya'.
format(e=ecosystem, b=eco_model.backend))
url = self.configuration.ANITYA_URL + '/api/by_ecosystem/' + backend
data = {'ecosystem': backend, 'name': package, 'homepage': homepage, 'check_release': True}
if backend == 'maven':
# for Maven, Anitya needs to know "version_url", which is groupId:artifactId
# which is exactly what we use as package name
data['version_url'] = package
self.log.debug('Creating Anitya project: %s', data)
return requests.post(url, json=data)
def _add_downstream_mapping(self, ecosystem, upstream_project, distribution, package_name):
anitya_url = self.configuration.ANITYA_URL
url = anitya_url + '/api/downstreams/{e}/{p}/'.format(e=ecosystem, p=upstream_project)
downstream_data = {'distro': distribution, 'package_name': package_name}
self.log.debug('Adding Anitya mapping: %s for %s/%s' % (downstream_data,
ecosystem,
upstream_project))
return requests.post(url, json=downstream_data)
def _get_downstream_rpm_pkgs(self, ecosystem, name):
distro, packages, package_names = None, [], ''
md5_hash = self._get_artifact_hash()
eco_obj = self.storage.get_ecosystem(ecosystem)
if eco_obj.is_backed_by(EcosystemBackend.maven):
# for maven, we use 'maven' prefix, the mapping is:
# maven:groupId:artifactId => list,of,rpms
# we use the fact that for maven artifacts, the component name is groupId:artifactId
hashmap = DownstreamMapCache()
downstream_mapping = hashmap[name]
if downstream_mapping is not None:
try:
distro, package_names = downstream_mapping.split()
except ValueError:
self.log.warning("Expecting 2 space-separated values, got '%s'",
downstream_mapping)
else:
self.log.debug('No groupId:artifactId %s found in DB (dist-git)', name)
elif md5_hash:
# Here we assume that the artifact hash matches upstream.
# In case of npm it's true as of npm-2.x.x in Fedora 24, but prior to that npm was
# mangling downloaded tarballs. If that feature returns we probably need to change
# IndianaJones to download artifacts directly.
hashmap = DownstreamMapCache()
downstream_mapping = hashmap[md5_hash]
if downstream_mapping is not None:
try:
distro, _, package_names = downstream_mapping.split()
except ValueError:
self.log.warning("Expecting 3 space-separated values, got '%s'",
downstream_mapping)
else:
self.log.debug('No hash %r found in DB (dist-git)', md5_hash)
else:
self.log.info("Can't map artifact %s (no hash, ecosystem %s)", name, ecosystem)
if package_names:
packages = package_names.split(',')
return distro, packages
def _get_downstream_mvn_pkgs(self, eco, pkg):
packages = []
self.log.info('Searching for {pkg} in maven repo {repo}...'.
format(pkg=pkg, repo=RH_MVN_GA_REPO))
ga = MavenCoordinates.from_str(pkg).to_repo_url(ga_only=True)
result = requests.get('{repo}/{pkg}'.format(repo=RH_MVN_GA_REPO, pkg=ga))
if result.status_code != 200:
self.log.info('Package {pkg} not found in {repo} (status code {code})'.
format(pkg=pkg, repo=RH_MVN_GA_REPO, code=result.status_code))
else:
self.log.info('Found {pkg} in {repo}'.format(pkg=pkg, repo=RH_MVN_GA_REPO))
packages.append(pkg)
return RH_MVN_DISTRO_NAME, packages
def execute(self, arguments):
self._strict_assert(arguments.get('ecosystem'))
self._strict_assert(arguments.get('name'))
eco = arguments['ecosystem']
pkg = arguments['name']
homepage = self._get_project_homepage(eco, pkg)
self.log.info('Registering project {e}/{p} to Anitya'.format(e=eco, p=pkg))
res = self._create_anitya_project(eco, pkg, homepage)
if res.status_code == 200:
self.log.info('Project {e}/{p} had already been registered to Anitya'.
format(e=eco, p=pkg))
elif res.status_code == 201:
self.log.info('Project {e}/{p} was successfully registered to Anitya'.
format(e=eco, p=pkg))
else:
self.log.error('Failed to create Anitya project {e}/{p}. Anitya response: {r}'.
format(e=eco, p=pkg, r=res.text))
return None
# TODO: When we move to a proper workflow manager, we'll want to raise TaskError
# here instead of just logging an error. Right now we don't want a problem
# in AnityaTask to shut down the rest of analysis phases.
# raise TaskError('Failed to create Anitya project {e}/{p}. Anitya response: {r}'.
# format(e=eco, p=pkg, r=res.text))
self.log.info('Project {e}/{p} created successfully'.format(e=eco, p=pkg))
self.log.debug('About to add downstream mapping for %s/%s to Anitya' % (eco, pkg))
distro_pkgs = {}
distro_pkgs.update([self._get_downstream_rpm_pkgs(eco, pkg)])
if self.storage.get_ecosystem(eco).is_backed_by(EcosystemBackend.maven):
distro_pkgs.update([self._get_downstream_mvn_pkgs(eco, pkg)])
for distro, package_names in distro_pkgs.items():
for package_name in package_names:
res = self._add_downstream_mapping(eco, pkg, distro, package_name)
if res.status_code == 200:
self.log.info('Downstream mapping %s/%s has already been added to project %s' %
(distro, package_name, pkg))
elif res.status_code == 201:
self.log.info('Downstream mapping %s/%s was added to project %s' %
(distro, package_name, pkg))
else:
raise TaskError('Failed to add downstream mapping %s/%s to project %s' %
(distro, package_name, pkg))
# we don't want to save any data, so return None
return None
| gpl-3.0 | -5,621,608,549,190,938,000 | 48.331579 | 99 | 0.59266 | false |
adamchainz/django-assets | setup.py | 3 | 2543 | #!/usr/bin/env python
# coding: utf8
from __future__ import with_statement
from setuptools import setup, find_packages
import multiprocessing
# Figure out the version; this could be done by importing the
# module, though that requires dependencies to be already installed,
# which may not be the case when processing a pip requirements
# file, for example.
def parse_version(assignee):
import os, re
here = os.path.dirname(os.path.abspath(__file__))
version_re = re.compile(
r'%s = (\(.*?\))' % assignee)
with open(os.path.join(here, 'django_assets', '__init__.py')) as fp:
for line in fp:
match = version_re.search(line)
if match:
version = eval(match.group(1))
return ".".join(map(str, version))
else:
raise Exception("cannot find version")
version = parse_version('__version__')
webassets_version = parse_version('__webassets_version__')
setup(
name='django-assets',
version=version,
url='http://github.com/miracle2k/django-assets',
license='BSD',
author='Michael Elsdörfer',
author_email='[email protected]',
description='Asset management for Django, to compress and merge '\
'CSS and Javascript files.',
long_description=__doc__,
packages=find_packages(exclude=('tests',)),
zip_safe=False,
platforms='any',
install_requires=[
'Django>=1.7',
'webassets%s' % webassets_version
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite='nose.collector',
tests_require=[
'nose',
],
# make plugin available to pytest
entry_points = {
'pytest11': [
'django_assets = django_assets.pytest_plugin',
]
},
)
| bsd-2-clause | 2,344,149,345,523,406,000 | 32.447368 | 72 | 0.597954 | false |
AdaptiveApplications/carnegie | tarc_bus_locator_client/quantities-0.10.1/quantities/units/mass.py | 4 | 3415 | # -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
from ..unitquantity import UnitQuantity, UnitMass
from .length import m
kg = kilogram = UnitMass(
'kilogram',
symbol='kg',
aliases=['kilograms']
)
g = gram = UnitMass(
'gram',
kg/1000,
symbol='g',
aliases=['grams']
)
mg = milligram = UnitMass(
'milligram',
gram/1000,
symbol='mg',
aliases=['milligrams']
)
oz = ounce = avoirdupois_ounce = UnitMass(
'ounce',
28.349523125*g,
symbol='oz',
aliases=['ounces','avoirdupois_ounce', 'avoirdupois_ounces'],
doc='exact'
)
lb = pound = avoirdupois_pound = UnitMass(
'pound',
0.45359237*kg,
symbol='lb',
aliases=['pounds', 'avoirdupois_pound', 'avoirdupois_pounds'],
doc='exact'
)
st = stone = UnitMass(
'stone',
14*lb,
symbol='st',
doc='As defined in the UK, 1 stone = 14 avoirdupois pounds'
)
carat = UnitMass(
'carat',
200*mg,
aliases=['carats']
)
gr = grain = UnitMass(
'grain',
64.79891*mg,
symbol='gr',
aliases=['grains']
)
long_hundredweight = UnitMass(
'long_hundredweight',
112*lb,
aliases=['long_hundredweights']
)
short_hundredweight = UnitMass(
'short_hundredweight',
100*lb,
aliases=['short_hundredweights']
) # cwt is used for both short and long hundredweight, so we wont use it
t = metric_ton = tonne = UnitMass(
'tonne',
1000*kg,
symbol='t',
aliases=['tonnes']
)
dwt = pennyweight = UnitMass(
'pennyweight',
24*gr,
symbol='dwt',
aliases=['pennyweights']
)
slug = slugs = UnitMass(
'slug',
14.59390*kg,
aliases=['slugs']
)
toz = troy_ounce = apounce = apothecary_ounce = UnitMass(
'troy_ounce',
480*gr,
symbol='toz',
u_symbol='℥',
aliases=[
'apounce', 'apounces', 'apothecary_ounce', 'apothecary_ounces',
'troy_ounces'
]
)
troy_pound = appound = apothecary_pound = UnitMass(
'troy_pound',
12*toz,
symbol='tlb',
u_symbol='℔',
aliases=[
'troy_pounds', 'appound', 'appounds', 'apothecary_pound',
'apothecary_pounds'
]
)
u = amu = atomic_mass_unit = dalton = Da = UnitMass(
'atomic_mass_unit',
1.660538782e-27*kg,
symbol='u',
aliases=['amu', 'Da', 'dalton'],
doc='relative uncertainty = 5e-8'
)
scruple = UnitMass(
'scruple',
20*gr,
u_symbol='℈',
aliases=['scruples']
)
dr = dram = UnitMass(
'dram',
oz/16,
symbol='dr',
aliases=['drams'],
doc='avoirdupois dram'
)
drachm = apdram = UnitMass(
'drachm',
60*gr,
u_symbol=' ',
aliases=['drachms', 'apdram', 'apdrams'],
doc='also known as the apothecary dram'
)
bag = UnitMass(
'bag',
94*lb,
aliases=['bags']
)
ton = short_ton = UnitMass(
'short_ton',
2000*lb,
aliases=['short_tons']
)
long_ton = UnitMass(
'long_ton', 2240*lb,
aliases=['long_tons']
) # both long and short tons are referred to as "ton" so we wont use it
############################################################
## Mass per unit length ##
############################################################
denier = UnitQuantity(
'denier',
g/(9000*m),
aliases=['deniers']
)
tex = UnitQuantity(
'tex',
g/(1000*m),
aliases=['texs']
)
dtex = UnitQuantity(
'dtex',
g/(10000*m),
aliases=['dtexs']
)
del UnitQuantity, m
| mit | -1,126,116,311,280,413,400 | 19.171598 | 72 | 0.557642 | false |
LavirtheWhiolet/PETOOH | win-compiler/petoohc.py | 2 | 3800 | #!python3
import subprocess, os, sys
VALID_CHARS = "adehkKoOru"
OP_INC, OP_DEC, OP_INCPTR, OP_DECPTR, OP_OUT, OP_JMP, OP_RET = range(7)
COMMANDS = ['Ko', 'kO', 'Kudah', 'kudah', 'Kukarek', 'Kud', 'kud']
def parse(code):
code = filter(VALID_CHARS.__contains__, code)
commands = []
command, buffer = "", ""
for c in code:
buffer = command + c
if command in (COMMANDS[OP_JMP], COMMANDS[OP_RET]) and c != 'a':
commands.append(command)
command = c
elif buffer in COMMANDS and buffer not in (COMMANDS[OP_JMP], COMMANDS[OP_RET]):
commands.append(buffer)
command = ""
else:
command += c
return commands
def main():
incCount, decCount = 0, 0 #kokoptimisation of KoKokOkO
whileIds = []
whileId = 0
command = ""
inputFileName = sys.argv[1]
# temp *.asm file
tempFileName = inputFileName[0:-5]+".asm"
inputFile = open(inputFileName)
tempFile = open(tempFileName,'w')
def append(s):
tempFile.write(s+'\n')
# exe file head
append('''
format PE console
entry start
include 'win32a.inc'
section '.data' data readable writeable
outv db 0
nll db 0; null terminator for string
section '.data?' data readable writeable
mem rb 65535
section '.code' code readable executable
start:
mov esi, mem
add esi, 32767
''')
for command in parse(inputFile.read()):
if incCount != 0 and command != COMMANDS[OP_INC]:
append('add byte [esi], '+str(incCount))
incCount = 0
elif decCount != 0 and command != COMMANDS[OP_DEC]:
append('sub byte [esi], '+str(decCount))
decCount = 0
if command == COMMANDS[OP_INC]:
incCount += 1
elif command == COMMANDS[OP_DEC]:
decCount += 1
# Input available only in PETOOH Enterprise Edition
#elif command == ',':
# append('ccall [getchar]')
# append('mov byte [esi], al')
elif command == COMMANDS[OP_OUT]:
append('mov ah, byte [esi]')
append('mov [outv], ah')
append('ccall [printf], outv')
elif command == COMMANDS[OP_JMP]:
whileId+=1
whileIds.append(whileId)
s = 'while_kokokopen_'+str(whileId)+''':
cmp byte [esi], 0
je while_kokoklose_'''+str(whileId)
append(s)
elif command == COMMANDS[OP_RET]:
wid = whileIds.pop()
append('jmp while_kokokopen_'+str(wid))
append('while_kokoklose_'+str(wid)+':')
elif command == COMMANDS[OP_DECPTR]:
append('add esi, 1')
elif command == COMMANDS[OP_INCPTR]:
append('sub esi, 1')
# end of exe file
append('''
ccall [getchar]
stdcall [ExitProcess],0
;====================================
section '.idata' import data readable
;====================================
library kernel,'kernel32.dll',\
msvcrt,'msvcrt.dll'
import kernel,\
ExitProcess,'ExitProcess'
import msvcrt,\
printf,'printf',\
getchar,'_getch'
''')
inputFile.close()
tempFile.close()
# put env vars
if len(sys.argv) == 3:
os.putenv('include', sys.argv[2]+"/INCLUDE") # 'D:/bf/FASM/INCLUDE'
os.putenv('PATH', sys.argv[2]+"/") # 'D:/bf/FASM', folder with fasm.exe
try:
subprocess.call(["fasm.exe", tempFileName])
except Exception as e:
print("Can't run fasm.exe")
print(e)
if __name__ == "__main__":
if len(sys.argv) in (2, 3):
main()
else:
print("\n USAGE: petoohc.py /path/to/file.koko [/path/to/fasm]")
| mit | -4,295,954,639,744,526,300 | 24.675676 | 87 | 0.532105 | false |
popazerty/dvbapp2-gui | lib/python/EGAMI/EGAMI_infobar_setup.py | 1 | 7053 | # -*- coding: utf-8 -*-
from Components.ActionMap import *
from Components.config import *
from Components.ConfigList import *
from Components.UsageConfig import *
from Components.Label import Label
from Components.UsageConfig import *
from Screens.Screen import Screen
import os
from EGAMI.EGAMI_skins import EGDecodingSetup_Skin, EGInfoBarSetup_Skin
from Plugins.Extensions.EGAMIPermanentClock.plugin import *
config.EGDecoding = ConfigSubsection()
config.EGDecoding.messageNoResources = ConfigYesNo(default=True)
config.EGDecoding.messageTuneFailed = ConfigYesNo(default=True)
config.EGDecoding.messageNoPAT = ConfigYesNo(default=True)
config.EGDecoding.messageNoPATEntry = ConfigYesNo(default=True)
config.EGDecoding.messageNoPMT = ConfigYesNo(default=True)
config.EGDecoding.dsemudmessages = ConfigYesNo(default=True)
config.EGDecoding.messageYesPmt = ConfigYesNo(default=False)
config.EGDecoding.show_ci_messages = ConfigYesNo(default=False)
class EGDecodingSetup(ConfigListScreen, Screen):
__module__ = __name__
def __init__(self, session, args = 0):
self.skin = EGDecodingSetup_Skin
Screen.__init__(self, session)
list = []
#list.append(getConfigListEntry(__('Enable pmtX.tmp -> X-1..9'), config.EGDecoding.messageYesPmt))
list.append(getConfigListEntry(_('Show Egami informations?'), config.EGDecoding.dsemudmessages))
list.append(getConfigListEntry(_('Show No free tuner info?'), config.EGDecoding.messageNoResources))
list.append(getConfigListEntry(_('Show Tune failed info?'), config.EGDecoding.messageTuneFailed))
list.append(getConfigListEntry(_('Show No data on transponder info?'), config.EGDecoding.messageNoPAT))
list.append(getConfigListEntry(_('Show Service not found info?'), config.EGDecoding.messageNoPATEntry))
list.append(getConfigListEntry(_('Show Service invalid info?'), config.EGDecoding.messageNoPMT))
list.append(getConfigListEntry(_('Show CI Messages?'), config.EGDecoding.show_ci_messages))
self["key_red"] = Label(_("Save"))
self["key_green"] = Label(_("Exit"))
ConfigListScreen.__init__(self, list)
self['actions'] = ActionMap(['OkCancelActions',
'ColorActions'], {'red': self.saveAndExit, 'green' : self.dontSaveAndExit,
'cancel': self.dontSaveAndExit}, -1)
def saveAndExit(self):
if config.EGDecoding.dsemudmessages.value is not False:
os.system("rm -rf /var/etc/.no_osd_messages")
elif config.EGDecoding.dsemudmessages.value is not True:
os.system("touch /var/etc/.no_osd_messages")
if config.EGDecoding.messageYesPmt.value is not False:
os.system("rm -rf /var/etc/.no_pmt_tmp")
elif config.EGDecoding.messageYesPmt.value is not True:
os.system("touch /var/etc/.no_pmt_tmp")
for x in self['config'].list:
x[1].save()
config.EGDecoding.save()
self.close()
def dontSaveAndExit(self):
for x in self['config'].list:
x[1].cancel()
self.close()
config.infobar = ConfigSubsection()
config.infobar.piconEnabled = ConfigYesNo(default=True)
config.infobar.piconType = ConfigSelection(choices={ 'Name': _('Name'), 'Reference': _('Reference')}, default='Reference')
config.infobar.piconDirectory = ConfigSelection(choices={ 'flash': _('/etc/picon/'),
'cf': _('/media/cf/'),
'usb': _('/media/usb/'),
'hdd': _('/media/hdd/')}, default='hdd')
config.infobar.piconDirectoryName = ConfigText(default = "picon", fixed_size = False)
config.infobar.permanentClockPosition = ConfigSelection(choices=["<>"], default="<>")
class EGInfoBarSetup(Screen, ConfigListScreen):
def __init__(self, session):
self.skin = EGInfoBarSetup_Skin
Screen.__init__(self, session)
self.list = []
ConfigListScreen.__init__(self, self.list)
self["key_red"] = Label(_("Cancel"))
self["key_green"] = Label(_("Save"))
self["actions"] = ActionMap(["WizardActions", "ColorActions"],
{
"red": self.keyCancel,
"back": self.keyCancel,
"green": self.keySave,
}, -2)
self.list.append(getConfigListEntry(_("Infobar timeout"), config.usage.infobar_timeout))
self.list.append(getConfigListEntry(_("Show permanental clock"), config.plugins.PermanentClock.enabled))
self.list.append(getConfigListEntry(_(' Set clock position'), config.infobar.permanentClockPosition))
self.list.append(getConfigListEntry(_("Show second infobar"), config.usage.show_second_infobar))
self.list.append(getConfigListEntry(_("Show event-progress in channel selection"), config.usage.show_event_progress_in_servicelist))
self.list.append(getConfigListEntry(_("Show channel numbers in channel selection"), config.usage.show_channel_numbers_in_servicelist))
self.list.append(getConfigListEntry(_("Show infobar on channel change"), config.usage.show_infobar_on_zap))
self.list.append(getConfigListEntry(_("Show infobar on skip forward/backward"), config.usage.show_infobar_on_skip))
self.list.append(getConfigListEntry(_("Show infobar on event change"), config.usage.show_infobar_on_event_change))
self.list.append(getConfigListEntry(_("Hide zap errors"), config.usage.hide_zap_errors))
self.list.append(getConfigListEntry(_("Hide CI messages"), config.usage.hide_ci_messages))
self.list.append(getConfigListEntry(_("Show crypto info in infobar"), config.usage.show_cryptoinfo))
self.list.append(getConfigListEntry(_("Swap SNR in db with SNR in percentage on OSD"), config.usage.swap_snr_on_osd))
self.list.append(getConfigListEntry(_("Show EIT now/next in infobar"), config.usage.show_eit_nownext))
self.list.append(getConfigListEntry(_('Use Picon:'), config.infobar.piconEnabled))
#if config.infobar.piconEnabled.value == True:
self.list.append(getConfigListEntry(_(' Picon Type:'), config.infobar.piconType))
self.list.append(getConfigListEntry(_(' Directory:'), config.infobar.piconDirectory))
self.list.append(getConfigListEntry(_(' Directory Name:'), config.infobar.piconDirectoryName))
self["config"].list = self.list
self["config"].l.setList(self.list)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.handleKeysLeftAndRight()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.handleKeysLeftAndRight()
def handleKeysLeftAndRight(self):
sel = self["config"].getCurrent()[1]
if sel == config.infobar.permanentClockPosition:
pClock.dialog.hide()
self.session.openWithCallback(self.positionerCallback, PermanentClockPositioner)
def positionerCallback(self, callback=None):
pClock.showHide()
def keySave(self):
for x in self["config"].list:
x[1].save()
if pClock.dialog is None:
pClock.gotSession(self.session)
if config.plugins.PermanentClock.enabled.value == True:
pClock.showHide()
if config.plugins.PermanentClock.enabled.value == False:
pClock.showHide()
self.close()
def keyCancel(self):
for x in self["config"].list:
x[1].cancel()
self.close()
| gpl-2.0 | -591,332,294,901,382,100 | 42.006098 | 139 | 0.713597 | false |
cjaymes/pyscap | src/scap/model/oval_5/defs/independent/FileBehaviors.py | 1 | 5665 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.Model import Model
logger = logging.getLogger(__name__)
class FileBehaviors(Model):
MODEL_MAP = {
'attributes': {
'max_depth': {'type': 'Integer', 'default': -1},
# 'max_depth' defines the maximum depth of recursion to perform when
# a recurse_direction is specified. A value of '0' is equivalent to
# no recursion, '1' means to step only one directory level up/down,
# and so on. The default value is '-1' meaning no limitation. For a
# 'max_depth' of -1 or any value of 1 or more the starting directory
# must be considered in the recursive search.
# Note that the default recurse_direction behavior is 'none' so even
# though max_depth specifies no limitation by default, the
# recurse_direction behavior turns recursion off.
# Note that this behavior only applies with the equality operation
# on the path entity.
'recurse': {'enum': ['directories', 'symlinks', 'symlinks and directories'], 'default': 'symlinks and directories'},
# 'recurse' defines how to recurse into the path entity, in other
# words what to follow during recursion. Options include symlinks,
# directories, or both. Note that a max-depth other than 0 has to be
# specified for recursion to take place and for this attribute to
# mean anything. Also note that this behavior does not apply to
# Windows systems since they do not support symbolic links. On
# Windows systems the 'recurse' behavior is always equivalent to
# directories.
# Note that this behavior only applies with the equality operation
# on the path entity.
'recurse_direction': {'enum': ['none', 'up', 'down'], 'default': 'none'},
# 'recurse_direction' defines the direction to recurse, either 'up'
# to parent directories, or 'down' into child directories. The
# default value is 'none' for no recursion.
# Note that this behavior only applies with the equality operation
# on the path entity.
'recurse_file_system': {'enum': ['all', 'local', 'defined'], 'default': 'all'},
# 'recurse_file_system' defines the file system limitation of any
# searching and applies to all operations as specified on the path
# or filepath entity.
# The value of 'local' limits the search scope to local file systems
# (as opposed to file systems mounted from an external system).
# The value of 'defined' keeps any recursion within the file system
# that the file_object (path+filename or filepath) has specified.
# For example, on Windows, if the path specified was "C:\", you
# would search only the C: drive, not other filesystems mounted to
# descendant paths. Similarly, on UNIX, if the path specified was "/",
# you would search only the filesystem mounted there, not other
# filesystems mounted to descendant paths. The value of 'defined'
# only applies when an equality operation is used for searching
# because the path or filepath entity must explicitly define a file
# system.
# The default value is 'all' meaning to search all available file
# systems for data collection.
# Note that in most cases it is recommended that the value of
# 'local' be used to ensure that file system searching is limited to
# only the local file systems. Searching 'all' file systems may have
# performance implications.
'windows_view': {'enum': ['32_bit', '64_bit'], 'default': '64_bit'},
# 64-bit versions of Windows provide an alternate file system and
# registry views to 32-bit applications. This behavior allows the
# OVAL Object to specify which view should be examined. This
# behavior only applies to 64-bit Windows, and must not be applied
# on other platforms.
# Note that the values have the following meaning: '64_bit' –
# Indicates that the 64-bit view on 64-bit Windows operating systems
# must be examined. On a 32-bit system, the Object must be evaluated
# without applying the behavior. '32_bit' – Indicates that the
# 32-bit view must be examined. On a 32-bit system, the Object must
# be evaluated without applying the behavior. It is recommended that
# the corresponding 'windows_view' entity be set on the OVAL Items
# that are collected when this behavior is used to distinguish
# between the OVAL Items that are collected in the 32-bit or 64-bit
# views.
}
}
| gpl-3.0 | -3,407,788,264,260,927,000 | 51.906542 | 128 | 0.6407 | false |
tyb0807/angr | tests/test_stochastic.py | 1 | 1171 | import os
import nose
import angr
location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
find = {
'veritesting_a': {
'x86_64': 0x40066a
}
}
criteria = {
'veritesting_a': lambda input_found: input_found.count('B') == 10
}
def run_stochastic(binary, arch):
proj = angr.Project(os.path.join(os.path.join(location, arch), binary),
auto_load_libs=False)
simgr = proj.factory.simgr()
start_state = simgr.active[0]
technique = angr.exploration_techniques.StochasticSearch(start_state)
simgr.use_technique(technique)
def found(simgr):
return simgr.active[0].addr == find[binary][arch]
simgr.run(until=found)
nose.tools.assert_equal(simgr.active[0].addr, find[binary][arch])
input_found = simgr.active[0].posix.dumps(0)
nose.tools.assert_true(criteria[binary](input_found))
def test_stochastic():
for binary in find:
for arch in find[binary]:
yield run_stochastic, binary, arch
if __name__ == "__main__":
for test_func, test_binary, test_arch in test_stochastic():
test_func(test_binary, test_arch)
| bsd-2-clause | 9,043,780,010,512,844,000 | 26.880952 | 97 | 0.643894 | false |
GeoMop/GeoMop | testing/gm_base/polygons/test_aabb_lookup.py | 1 | 2016 | import pytest
import numpy as np
import numpy.linalg as la
from gm_base.polygons.aabb_lookup import *
def test_make_aabb():
points = [ [0,0], [4,1], [0,-4],[2, 5]]
box = make_aabb(points)
assert np.all(box == np.array([0,-4,4,5]))
box = make_aabb(points, margin=0.1)
assert np.all(box == np.array([-0.1, -4.1, 4.1, 5.1]))
def test_intersect_candidates():
al = AABB_Lookup()
box = make_aabb([[-1,-1],[1,1]], margin = 0.1)
def add_box(*pts):
al.add_object(add_box.ibox, make_aabb(pts) )
add_box.ibox += 1
add_box.ibox = 0
add_box([1, 1], [2, 3])
add_box([0, 0], [1, 1])
add_box([1, -1], [2, -1])
add_box([-1,-1], [1,1])
add_box([1, 5], [2, 10])
add_box([1, -5], [2, -10])
candidates = al.intersect_candidates(box)
assert candidates.tolist() == [0,1,2,3]
def min_distance(point, box_list):
min_dist = (np.inf, None)
for i, box in enumerate(box_list):
dist = min( la.norm(box[0:2] - point), la.norm(box[2:4] - point) )
if dist < min_dist[0]:
min_dist = (dist, i)
return min_dist
@pytest.mark.parametrize("seed", list(range(40)))
def test_closest_candidates(seed):
al = AABB_Lookup(init_size=10)
def add_box(*pts):
al.add_object(add_box.ibox, make_aabb(pts) )
add_box.ibox += 1
add_box.ibox = 0
np.random.seed(seed)
size = 1000
boxes1 = 3*np.random.rand(size, 2)
boxes2 = boxes1 + 1.5*(np.random.rand(size, 2) -0.5)
boxes = np.concatenate((boxes1, boxes2), axis=1)
for i, box in enumerate(boxes):
add_box(*box.reshape(2,2))
point = np.array([1., 2.])
ref_min_dist = min_distance(point, boxes)
candidates = al.closest_candidates(point)
print("{} < {}".format(len(candidates), boxes.shape[0]))
assert len(candidates) < boxes.shape[0]
c_boxes = boxes[candidates,:]
min_dist = min_distance(point, c_boxes)
min_dist = (min_dist[0], candidates[min_dist[1]])
assert ref_min_dist == min_dist
| gpl-3.0 | 6,148,171,258,568,407,000 | 27.394366 | 74 | 0.575397 | false |
Joel-U/sparkle | sparkle/gui/stim/component_detail.py | 2 | 5941 |
from sparkle.QtWrapper import QtGui
from sparkle.tools.util import clearLayout
class ComponentsDetailWidget(QtGui.QWidget):
"""class that presents the stimulus doc in a clear and useful way"""
def __init__(self, parent=None):
super(ComponentsDetailWidget, self).__init__(parent)
self.lyt = QtGui.QVBoxLayout()
self.setLayout(self.lyt)
# keeps track of which attributes to display
self.displayTable = {}
self.defaultAttributes = ['intensity', 'risefall']
font = QtGui.QFont()
font.setPointSize(12)
self.setFont(font)
def setDisplayTable(self, table):
"""Sets the table that determines what attributes to display
:param table: keys of stimulus names, and values of a list of attribute names to display
:type table: dict
"""
self.displayTable = table
def setDefaultAttributes(self, defaults):
"""Sets the default attributes to display, if a stimulus type is not in
the display table
:param defaults: names of attributes to show
:type defaults: list<str>
"""
self.defaultAttributes = defaults
def setDoc(self, docs):
"""Sets the documentation to display
:param docs: a list of the stimuli doc, which are dicts
:type docs: list<dict>
"""
# sort stim by start time
docs = sorted(docs, key=lambda k: k['start_s'])
for doc in docs:
stim_type = doc['stim_type']
if not stim_type in self.displayTable:
continue
if not stim_type in self.displayTable[stim_type]:
continue
display_attributes = self.displayTable.get(stim_type, self.defaultAttributes)
self.lyt.addWidget(ComponentDetailFrame(doc, display_attributes))
def clearDoc(self):
"""Clears the widget"""
clearLayout(self.lyt)
class ComponentDetailFrame(QtGui.QFrame):
"""Displays the given *displayAttributes* in a stimulus component's documentation *comp_doc*"""
def __init__(self, comp_doc, displayAttributes):
super(ComponentDetailFrame, self).__init__()
self.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
font = QtGui.QFont()
font.setPointSize(14)
glay = QtGui.QGridLayout()
stim_type = comp_doc['stim_type']
# always at least include stimulus type
title = QtGui.QLabel(stim_type)
title.setFont(font)
glay.addWidget(title,0,0)
# get any other attributes to display, or defaults if not specified
for i, attr in enumerate(displayAttributes):
if attr == stim_type:
continue # already got it
val = comp_doc[attr]
# add to UI
glay.addWidget(QtGui.QLabel(attr),i+1,0)
glay.addWidget(QtGui.QLabel(str(val)),i+1,1)
self.setLayout(glay)
class ComponentsDetailSelector(QtGui.QWidget):
"""Container for ComponentAttributerCheckers"""
def __init__(self, parent=None):
super(ComponentsDetailSelector, self).__init__(parent)
layout = QtGui.QVBoxLayout()
self.setLayout(layout)
def setComponents(self, components):
"""Clears and sets the components contained in this widget
:param components: list of documentation for subclasses of AbStractStimulusComponents
:type Components: list<dict>
"""
layout = self.layout()
for comp in components:
attrWidget = ComponentAttributerChecker(comp)
layout.addWidget(attrWidget)
def setCheckedDetails(self, checked):
"""Sets which components are checked
:param checked: dictionary of stimtype:list<attribute names> for which components and their attributes should be checked
:type checked: dict
"""
layout = self.layout()
for i in range(layout.count()):
w = layout.itemAt(i).widget()
if w.stimType in checked:
w.setChecked(checked[w.stimType])
def getCheckedDetails(self):
"""Gets the currently checked components and checked attributes
:returns: dict -- of members with stimtype:list<attribute names>
"""
attrs = {}
layout = self.layout()
for i in range(layout.count()):
w = layout.itemAt(i).widget()
attrs[w.stimType] = w.getChecked()
return attrs
class ComponentAttributerChecker(QtGui.QFrame):
"""Allows a user to select attributes from a components's doc"""
def __init__(self, compAttributes):
super(ComponentAttributerChecker, self).__init__()
self.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Sunken)
layout = QtGui.QGridLayout()
font = QtGui.QFont()
font.setBold(True)
stimType = compAttributes.pop('stim_type')
title = QtGui.QCheckBox(stimType)
title.setFont(font)
layout.addWidget(title,0,0)
for i, key in enumerate(compAttributes):
layout.addWidget(QtGui.QCheckBox(key),i+1,0)
self.setLayout(layout)
self.stimType = stimType
def setChecked(self, tocheck):
"""Sets the attributes *tocheck* as checked
:param tocheck: attributes names to check
:type tocheck: list<str>
"""
layout = self.layout()
for i in range(layout.count()):
w = layout.itemAt(i).widget()
if w.text() in tocheck:
w.setChecked(True)
def getChecked(self):
"""Gets the checked attributes
:returns: list<str> -- checked attribute names
"""
attrs = []
layout = self.layout()
for i in range(layout.count()):
w = layout.itemAt(i).widget()
if w.isChecked():
attrs.append(str(w.text()))
return attrs
| gpl-3.0 | 794,067,393,031,871,900 | 32.755682 | 128 | 0.611682 | false |
ktosiek/spacewalk | backend/server/action_extra_data/solarispkgs.py | 2 | 3853 | #
# Copyright (c) 2008--2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# config file-related queuing functions
#
from spacewalk.common.rhnLog import log_debug
from spacewalk.server import rhnSQL
# the "exposed" functions
__rhnexport__ = ['install', 'remove', 'patchInstall', 'patchRemove',
'patchClusterInstall', 'patchClusterRemove', 'refresh_list', ]
_query_lookup_action_package = rhnSQL.Statement("""
select ap.id
from rhnActionPackage ap
where ap.action_id = :action_id
and ap.name_id = LOOKUP_PACKAGE_NAME(:name)
and ap.evr_id = LOOKUP_EVR(:epoch, :version, :release)
""")
_query_delete_server_action_package_result = rhnSQL.Statement("""
delete from rhnServerActionPackageResult
where server_id = :server_id
and action_package_id in
(select ap.id
from rhnActionPackage ap
where ap.action_id = :action_id)
""")
_query_insert_server_action_package_result = rhnSQL.Statement("""
insert into rhnServerActionPackageResult
(server_id, action_package_id, result_code, stderr, stdout)
values (:server_id, :action_package_id, :result_code, :stdout_data,
:stderr_data)
""")
def install(server_id, action_id, data={}):
log_debug(1, "Result", data)
# Data is a dict of:
# version = 0
# name = "solarispkgs.install"
# status = [
# [(n, v, r, a), (ret, stdout, stderr)],
# ...
# ]
h = rhnSQL.prepare(_query_lookup_action_package)
key_id = {}
status_data = data.get('status', [])
for k, v in status_data:
params = {
'action_id' : action_id,
'name' : k[0],
'version' : k[1],
'release' : k[2],
'epoch' : None,
}
apply(h.execute, (), params)
row = h.fetchone_dict()
if not row:
log_debug(4, "action_id: %d; server_id: %s; package specified, "
"but not found in rhnActionPackage: %s" % (
action_id, server_id, k))
continue
k = tuple(k)
key_id[k] = (row['id'], v)
# Remove old entries, if present
h = rhnSQL.prepare(_query_delete_server_action_package_result)
h.execute(server_id=server_id, action_id=action_id)
# Insert new entries
h = rhnSQL.prepare(_query_insert_server_action_package_result, blob_map={'stdout_data': 'stdout_data', 'stderr_data': 'stderr_data'} )
for k, (action_package_id, v) in key_id.items():
result_code, stdout_data, stderr_data = v[:3]
if stdout_data:
stdout_data = str(stdout_data or "")
if stderr_data:
stderr_data = str(stderr_data or "")
if not (stdout_data or stderr_data):
# Nothing to do
continue
h.execute(server_id=server_id, action_package_id=action_package_id,
result_code=result_code, stdout_data=stdout_data, stderr_data=stderr_data)
remove = install
patchInstall = install
patchRemove = install
patchClusterInstall = install
patchClusterRemove = install
def refresh_list(server_id, action_id, data={}):
if not data:
return
log_debug("action_extra_data.packages.refresh_list: Should do something "
"useful with this data", server_id, action_id, data)
| gpl-2.0 | -5,224,145,350,613,914,000 | 33.711712 | 138 | 0.626265 | false |
SUPINFOLaboDev/TheSnake | Affichage.py | 1 | 1351 | import pygame
import Jeu
import Snake
class Affichage:
def __init__(self):
self.__fenetre = pygame.display.set_mode((450, 450))
pygame.display.set_caption('Snake')
self.__resolution_x = 640
self.__resolution_y = 480
self.__corps_snake = pygame.image.load("case snake.png")
print('Affichage cree')
self.__serpent = Snake.Snake()
self.__jeu = Jeu.Jeu()
def affichage_jeu(self):
# affiche le background
self.affichage_tableau()
self.affichage_snake()
self.affichage_pomme()
self.affichage_score()
def affichage_tableau(self):
# print('tableau affiche')
return 0
def affichage_snake(self):
for coord in self.__serpent.get_liste_snake():
self.__fenetre.blit(self.__corps_snake, (coord[0]*30, coord[1]*30))
def affichage_pomme(self):
return 0
def affichage_score(self):
return 0
def effacer_fenetre(self):
self.__fenetre.fill((0, 0, 0))
def get_xres(self):
return self.__resolution_x
def get_yres(self):
return self.__resolution_y
def set_xres(self, valeur):
self.__resolution_x = valeur
def set_yres(self, valeur):
self.__resolution_y = valeur
def get_serpent(self):
return self.__serpent | mit | -3,050,992,350,878,395,000 | 22.719298 | 79 | 0.586973 | false |
skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/sqlalchemy/orm/state.py | 33 | 21014 | # orm/state.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation of instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import weakref
from .. import util
from . import exc as orm_exc, interfaces
from .path_registry import PathRegistry
from .base import PASSIVE_NO_RESULT, SQL_OK, NEVER_SET, ATTR_WAS_SET, \
NO_VALUE, PASSIVE_NO_INITIALIZE, INIT_OK, PASSIVE_OFF
from . import base
class InstanceState(interfaces._InspectionAttr):
"""tracks state information at the instance level.
The :class:`.InstanceState` is a key object used by the
SQLAlchemy ORM in order to track the state of an object;
it is created the moment an object is instantiated, typically
as a result of :term:`instrumentation` which SQLAlchemy applies
to the ``__init__()`` method of the class.
:class:`.InstanceState` is also a semi-public object,
available for runtime inspection as to the state of a
mapped instance, including information such as its current
status within a particular :class:`.Session` and details
about data on individual attributes. The public API
in order to acquire a :class:`.InstanceState` object
is to use the :func:`.inspect` system::
>>> from sqlalchemy import inspect
>>> insp = inspect(some_mapped_object)
.. seealso::
:ref:`core_inspection_toplevel`
"""
session_id = None
key = None
runid = None
load_options = util.EMPTY_SET
load_path = ()
insert_order = None
_strong_obj = None
modified = False
expired = False
deleted = False
_load_pending = False
is_instance = True
def __init__(self, obj, manager):
self.class_ = obj.__class__
self.manager = manager
self.obj = weakref.ref(obj, self._cleanup)
self.callables = {}
self.committed_state = {}
@util.memoized_property
def attrs(self):
"""Return a namespace representing each attribute on
the mapped object, including its current value
and history.
The returned object is an instance of :class:`.AttributeState`.
This object allows inspection of the current data
within an attribute as well as attribute history
since the last flush.
"""
return util.ImmutableProperties(
dict(
(key, AttributeState(self, key))
for key in self.manager
)
)
@property
def transient(self):
"""Return true if the object is :term:`transient`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and \
not self._attached
@property
def pending(self):
"""Return true if the object is :term:`pending`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and \
self._attached
@property
def persistent(self):
"""Return true if the object is :term:`persistent`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and \
self._attached
@property
def detached(self):
"""Return true if the object is :term:`detached`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and \
not self._attached
@property
@util.dependencies("sqlalchemy.orm.session")
def _attached(self, sessionlib):
return self.session_id is not None and \
self.session_id in sessionlib._sessions
@property
@util.dependencies("sqlalchemy.orm.session")
def session(self, sessionlib):
"""Return the owning :class:`.Session` for this instance,
or ``None`` if none available."""
return sessionlib._state_session(self)
@property
def object(self):
"""Return the mapped object represented by this
:class:`.InstanceState`."""
return self.obj()
@property
def identity(self):
"""Return the mapped identity of the mapped object.
This is the primary key identity as persisted by the ORM
which can always be passed directly to
:meth:`.Query.get`.
Returns ``None`` if the object has no primary key identity.
.. note::
An object which is transient or pending
does **not** have a mapped identity until it is flushed,
even if its attributes include primary key values.
"""
if self.key is None:
return None
else:
return self.key[1]
@property
def identity_key(self):
"""Return the identity key for the mapped object.
This is the key used to locate the object within
the :attr:`.Session.identity_map` mapping. It contains
the identity as returned by :attr:`.identity` within it.
"""
# TODO: just change .key to .identity_key across
# the board ? probably
return self.key
@util.memoized_property
def parents(self):
return {}
@util.memoized_property
def _pending_mutations(self):
return {}
@util.memoized_property
def mapper(self):
"""Return the :class:`.Mapper` used for this mapepd object."""
return self.manager.mapper
@property
def has_identity(self):
"""Return ``True`` if this object has an identity key.
This should always have the same value as the
expression ``state.persistent or state.detached``.
"""
return bool(self.key)
def _detach(self):
self.session_id = self._strong_obj = None
def _dispose(self):
self._detach()
del self.obj
def _cleanup(self, ref):
instance_dict = self._instance_dict()
if instance_dict:
instance_dict.discard(self)
self.callables = {}
self.session_id = self._strong_obj = None
del self.obj
def obj(self):
return None
@property
def dict(self):
"""Return the instance dict used by the object.
Under normal circumstances, this is always synonymous
with the ``__dict__`` attribute of the mapped object,
unless an alternative instrumentation system has been
configured.
In the case that the actual object has been garbage
collected, this accessor returns a blank dictionary.
"""
o = self.obj()
if o is not None:
return base.instance_dict(o)
else:
return {}
def _initialize_instance(*mixed, **kwargs):
self, instance, args = mixed[0], mixed[1], mixed[2:]
manager = self.manager
manager.dispatch.init(self, args, kwargs)
try:
return manager.original_init(*mixed[1:], **kwargs)
except:
manager.dispatch.init_failure(self, args, kwargs)
raise
def get_history(self, key, passive):
return self.manager[key].impl.get_history(self, self.dict, passive)
def get_impl(self, key):
return self.manager[key].impl
def _get_pending_mutation(self, key):
if key not in self._pending_mutations:
self._pending_mutations[key] = PendingCollection()
return self._pending_mutations[key]
def __getstate__(self):
state_dict = {'instance': self.obj()}
state_dict.update(
(k, self.__dict__[k]) for k in (
'committed_state', '_pending_mutations', 'modified',
'expired', 'callables', 'key', 'parents', 'load_options',
'class_',
) if k in self.__dict__
)
if self.load_path:
state_dict['load_path'] = self.load_path.serialize()
state_dict['manager'] = self.manager._serialize(self, state_dict)
return state_dict
def __setstate__(self, state_dict):
inst = state_dict['instance']
if inst is not None:
self.obj = weakref.ref(inst, self._cleanup)
self.class_ = inst.__class__
else:
# None being possible here generally new as of 0.7.4
# due to storage of state in "parents". "class_"
# also new.
self.obj = None
self.class_ = state_dict['class_']
self.committed_state = state_dict.get('committed_state', {})
self._pending_mutations = state_dict.get('_pending_mutations', {})
self.parents = state_dict.get('parents', {})
self.modified = state_dict.get('modified', False)
self.expired = state_dict.get('expired', False)
self.callables = state_dict.get('callables', {})
self.__dict__.update([
(k, state_dict[k]) for k in (
'key', 'load_options',
) if k in state_dict
])
if 'load_path' in state_dict:
self.load_path = PathRegistry.\
deserialize(state_dict['load_path'])
state_dict['manager'](self, inst, state_dict)
def _initialize(self, key):
"""Set this attribute to an empty value or collection,
based on the AttributeImpl in use."""
self.manager.get_impl(key).initialize(self, self.dict)
def _reset(self, dict_, key):
"""Remove the given attribute and any
callables associated with it."""
old = dict_.pop(key, None)
if old is not None and self.manager[key].impl.collection:
self.manager[key].impl._invalidate_collection(old)
self.callables.pop(key, None)
def _expire_attribute_pre_commit(self, dict_, key):
"""a fast expire that can be called by column loaders during a load.
The additional bookkeeping is finished up in commit_all().
Should only be called for scalar attributes.
This method is actually called a lot with joined-table
loading, when the second table isn't present in the result.
"""
dict_.pop(key, None)
self.callables[key] = self
@classmethod
def _row_processor(cls, manager, fn, key):
impl = manager[key].impl
if impl.collection:
def _set_callable(state, dict_, row):
old = dict_.pop(key, None)
if old is not None:
impl._invalidate_collection(old)
state.callables[key] = fn
else:
def _set_callable(state, dict_, row):
state.callables[key] = fn
return _set_callable
def _expire(self, dict_, modified_set):
self.expired = True
if self.modified:
modified_set.discard(self)
self.modified = False
self._strong_obj = None
self.committed_state.clear()
InstanceState._pending_mutations._reset(self)
# clear out 'parents' collection. not
# entirely clear how we can best determine
# which to remove, or not.
InstanceState.parents._reset(self)
for key in self.manager:
impl = self.manager[key].impl
if impl.accepts_scalar_loader and \
(impl.expire_missing or key in dict_):
self.callables[key] = self
old = dict_.pop(key, None)
if impl.collection and old is not None:
impl._invalidate_collection(old)
self.manager.dispatch.expire(self, None)
def _expire_attributes(self, dict_, attribute_names):
pending = self.__dict__.get('_pending_mutations', None)
for key in attribute_names:
impl = self.manager[key].impl
if impl.accepts_scalar_loader:
self.callables[key] = self
old = dict_.pop(key, None)
if impl.collection and old is not None:
impl._invalidate_collection(old)
self.committed_state.pop(key, None)
if pending:
pending.pop(key, None)
self.manager.dispatch.expire(self, attribute_names)
def __call__(self, state, passive):
"""__call__ allows the InstanceState to act as a deferred
callable for loading expired attributes, which is also
serializable (picklable).
"""
if not passive & SQL_OK:
return PASSIVE_NO_RESULT
toload = self.expired_attributes.\
intersection(self.unmodified)
self.manager.deferred_scalar_loader(self, toload)
# if the loader failed, or this
# instance state didn't have an identity,
# the attributes still might be in the callables
# dict. ensure they are removed.
for k in toload.intersection(self.callables):
del self.callables[k]
return ATTR_WAS_SET
@property
def unmodified(self):
"""Return the set of keys which have no uncommitted changes"""
return set(self.manager).difference(self.committed_state)
def unmodified_intersection(self, keys):
"""Return self.unmodified.intersection(keys)."""
return set(keys).intersection(self.manager).\
difference(self.committed_state)
@property
def unloaded(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return set(self.manager).\
difference(self.committed_state).\
difference(self.dict)
@property
def _unloaded_non_object(self):
return self.unloaded.intersection(
attr for attr in self.manager
if self.manager[attr].impl.accepts_scalar_loader
)
@property
def expired_attributes(self):
"""Return the set of keys which are 'expired' to be loaded by
the manager's deferred scalar loader, assuming no pending
changes.
see also the ``unmodified`` collection which is intersected
against this set when a refresh operation occurs.
"""
return set([k for k, v in self.callables.items() if v is self])
def _instance_dict(self):
return None
def _modified_event(
self, dict_, attr, previous, collection=False, force=False):
if not attr.send_modified_events:
return
if attr.key not in self.committed_state or force:
if collection:
if previous is NEVER_SET:
if attr.key in dict_:
previous = dict_[attr.key]
if previous not in (None, NO_VALUE, NEVER_SET):
previous = attr.copy(previous)
self.committed_state[attr.key] = previous
# assert self._strong_obj is None or self.modified
if (self.session_id and self._strong_obj is None) \
or not self.modified:
instance_dict = self._instance_dict()
if instance_dict:
instance_dict._modified.add(self)
# only create _strong_obj link if attached
# to a session
inst = self.obj()
if self.session_id:
self._strong_obj = inst
if inst is None:
raise orm_exc.ObjectDereferencedError(
"Can't emit change event for attribute '%s' - "
"parent object of type %s has been garbage "
"collected."
% (
self.manager[attr.key],
base.state_class_str(self)
))
self.modified = True
def _commit(self, dict_, keys):
"""Commit attributes.
This is used by a partial-attribute load operation to mark committed
those attributes which were refreshed from the database.
Attributes marked as "expired" can potentially remain "expired" after
this step if a value was not populated in state.dict.
"""
for key in keys:
self.committed_state.pop(key, None)
self.expired = False
for key in set(self.callables).\
intersection(keys).\
intersection(dict_):
del self.callables[key]
def _commit_all(self, dict_, instance_dict=None):
"""commit all attributes unconditionally.
This is used after a flush() or a full load/refresh
to remove all pending state from the instance.
- all attributes are marked as "committed"
- the "strong dirty reference" is removed
- the "modified" flag is set to False
- any "expired" markers/callables for attributes loaded are removed.
Attributes marked as "expired" can potentially remain
"expired" after this step if a value was not populated in state.dict.
"""
self._commit_all_states([(self, dict_)], instance_dict)
@classmethod
def _commit_all_states(self, iter, instance_dict=None):
"""Mass version of commit_all()."""
for state, dict_ in iter:
state.committed_state.clear()
InstanceState._pending_mutations._reset(state)
callables = state.callables
for key in list(callables):
if key in dict_ and callables[key] is state:
del callables[key]
if instance_dict and state.modified:
instance_dict._modified.discard(state)
state.modified = state.expired = False
state._strong_obj = None
class AttributeState(object):
"""Provide an inspection interface corresponding
to a particular attribute on a particular mapped object.
The :class:`.AttributeState` object is accessed
via the :attr:`.InstanceState.attrs` collection
of a particular :class:`.InstanceState`::
from sqlalchemy import inspect
insp = inspect(some_mapped_object)
attr_state = insp.attrs.some_attribute
"""
def __init__(self, state, key):
self.state = state
self.key = key
@property
def loaded_value(self):
"""The current value of this attribute as loaded from the database.
If the value has not been loaded, or is otherwise not present
in the object's dictionary, returns NO_VALUE.
"""
return self.state.dict.get(self.key, NO_VALUE)
@property
def value(self):
"""Return the value of this attribute.
This operation is equivalent to accessing the object's
attribute directly or via ``getattr()``, and will fire
off any pending loader callables if needed.
"""
return self.state.manager[self.key].__get__(
self.state.obj(), self.state.class_)
@property
def history(self):
"""Return the current pre-flush change history for
this attribute, via the :class:`.History` interface.
This method will **not** emit loader callables if the value of the
attribute is unloaded.
.. seealso::
:meth:`.AttributeState.load_history` - retrieve history
using loader callables if the value is not locally present.
:func:`.attributes.get_history` - underlying function
"""
return self.state.get_history(self.key,
PASSIVE_NO_INITIALIZE)
def load_history(self):
"""Return the current pre-flush change history for
this attribute, via the :class:`.History` interface.
This method **will** emit loader callables if the value of the
attribute is unloaded.
.. seealso::
:attr:`.AttributeState.history`
:func:`.attributes.get_history` - underlying function
.. versionadded:: 0.9.0
"""
return self.state.get_history(self.key,
PASSIVE_OFF ^ INIT_OK)
class PendingCollection(object):
"""A writable placeholder for an unloaded collection.
Stores items appended to and removed from a collection that has not yet
been loaded. When the collection is loaded, the changes stored in
PendingCollection are applied to it to produce the final result.
"""
def __init__(self):
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
def append(self, value):
if value in self.deleted_items:
self.deleted_items.remove(value)
else:
self.added_items.add(value)
def remove(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
| gpl-2.0 | 347,556,891,364,119,700 | 29.812317 | 77 | 0.59289 | false |
newren/git-multimail | setup.py | 2 | 1403 | #! /usr/bin/env python2
import sys
import os
from setuptools import setup
assert 0x02040000 <= sys.hexversion < 0x03000000, \
"Install Python 2, version 2.4 or greater"
def read_version():
sys.path.insert(0, os.path.join('git-multimail'))
import git_multimail
return git_multimail.__version__
def read_readme():
return open(os.path.join('git-multimail', 'README')).read()
setup(
name='git-multimail',
version=read_version(),
description='Send notification emails for Git pushes',
long_description=read_readme(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2 :: Only',
'Topic :: Communications :: Email',
'Topic :: Software Development :: Version Control',
],
keywords='git hook email',
url='https://github.com/git-multimail/git-multimail',
author='Michael Haggerty',
author_email='[email protected]',
maintainer='Matthieu Moy',
maintainer_email='[email protected]',
license='GPLv2',
package_dir={'': 'git-multimail'},
py_modules=['git_multimail'],
)
| gpl-2.0 | -8,852,782,307,343,172,000 | 31.627907 | 75 | 0.647897 | false |
recklessromeo/otm-core | opentreemap/api/tests.py | 2 | 69747 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from StringIO import StringIO
from json import loads, dumps
from urlparse import urlparse
import urllib
import os
import json
import base64
import datetime
from unittest.case import skip
from django.contrib.auth.models import AnonymousUser
from django.contrib.gis.geos import Point
from django.test.utils import override_settings
from django.test.client import Client, RequestFactory, ClientHandler
from django.http import HttpRequest
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.files import File
from treemap.lib.udf import udf_create
from treemap.models import Species, Plot, Tree, User, FieldPermission
from treemap.instance import create_stewardship_udfs
from treemap.audit import ReputationMetric, Audit
from treemap.tests import (make_user, make_request, set_invisible_permissions,
make_instance, LocalMediaTestCase, media_dir,
make_commander_user)
from treemap.tests.base import OTMTestCase
from exporter.tests import UserExportsTestCase
from api.test_utils import setupTreemapEnv, mkPlot, mkTree
from api.models import APIAccessCredential
from api.views import (add_photo_endpoint, update_profile_photo_endpoint,
instance_info_endpoint)
from api.instance import (instances_closest_to_point, instance_info,
public_instances)
from api.user import create_user
from api.auth import get_signature_for_request
from api.decorators import (check_signature, SIG_TIMESTAMP_FORMAT,
API_VERSIONS)
LATEST_API = str(max(API_VERSIONS))
API_PFX = "/api/v%s" % LATEST_API
def sign_request_as_user(request, user):
try:
cred = APIAccessCredential.objects.get(user=user)
except APIAccessCredential.DoesNotExist:
cred = APIAccessCredential.create(user=user)
return sign_request(request, cred)
def sign_request(request, cred=None):
if cred is None:
cred = APIAccessCredential.create()
nowstr = datetime.datetime.now().strftime(SIG_TIMESTAMP_FORMAT)
reqdict = dict(request.REQUEST.iteritems())
reqdict['timestamp'] = nowstr
reqdict['access_key'] = cred.access_key
request.REQUEST.dicts = [reqdict]
sig = get_signature_for_request(request, cred.secret_key)
request.REQUEST.dicts[0]['signature'] = sig
return request
def _get_path(parsed_url):
"""
Taken from a class method in the Django test client
"""
# If there are parameters, add them
if parsed_url[3]:
return urllib.unquote(parsed_url[2] + ";" + parsed_url[3])
else:
return urllib.unquote(parsed_url[2])
def send_json_body(url, body_object, client, method, user=None):
"""
Serialize a list or dictionary to JSON then send it to an endpoint.
The "post" method exposed by the Django test client assumes that you
are posting form data, so you need to manually setup the parameters
to override that default functionality.
"""
body_string = dumps(body_object)
body_stream = StringIO(body_string)
parsed_url = urlparse(url)
client_params = {
'CONTENT_LENGTH': len(body_string),
'CONTENT_TYPE': 'application/json',
'PATH_INFO': _get_path(parsed_url),
'QUERY_STRING': parsed_url[4],
'REQUEST_METHOD': method,
'wsgi.input': body_stream,
}
return _send_with_client_params(url, client, client_params, user)
class SignedClientHandler(ClientHandler):
def __init__(self, sign, sign_as, *args, **kwargs):
self.sign = sign
self.sign_as = sign_as
super(SignedClientHandler, self).__init__(*args, **kwargs)
def get_response(self, req):
if self.sign:
req = sign_request_as_user(req, self.sign_as)
return super(SignedClientHandler, self).get_response(req)
def get_signed(client, *args, **kwargs):
handler = client.handler
client.handler = SignedClientHandler(True, kwargs.get('user', None))
resp = client.get(*args, **kwargs)
client.handler = handler
return resp
def _send_with_client_params(url, client, client_params, user=None):
handler = client.handler
client.handler = SignedClientHandler(True, user)
resp = client.post(url, **client_params)
client.handler = handler
return resp
def post_json(url, body_object, client, user=None):
"""
Serialize a list or dictionary to JSON then POST it to an endpoint.
The "post" method exposed by the Django test client assumes that you
are posting form data, so you need to manually setup the parameters
to override that default functionality.
"""
return send_json_body(url, body_object, client, 'POST', user)
def put_json(url, body_object, client, user=None):
return send_json_body(url, body_object, client, 'PUT', user)
def assert_reputation(test_case, expected_reputation):
"""
'test_case' object should have attributes 'user' and 'instance'
Tests whether user's reputation is as expected.
Reloads user object from database since reputation may have changed.
"""
user = User.objects.get(pk=test_case.user.id)
reputation = user.get_reputation(test_case.instance)
test_case.assertEqual(expected_reputation, reputation,
'Reputation is %s but %s was expected'
% (reputation, expected_reputation))
class Version(OTMTestCase):
def setUp(self):
setupTreemapEnv()
self.u = User.objects.get(username="jim")
def test_version(self):
settings.OTM_VERSION = "1.2.3"
settings.API_VERSION = "2"
ret = get_signed(self.client, "%s/version" % API_PFX)
self.assertEqual(ret.status_code, 200)
content = loads(ret.content)
self.assertEqual(content["otm_version"], settings.OTM_VERSION)
self.assertEqual(content["api_version"], settings.API_VERSION)
class PlotListing(OTMTestCase):
def setUp(self):
self.instance = setupTreemapEnv()
self.u = User.objects.get(username="commander")
self.client = Client()
def test_edits(self):
#TODO: Test recent edits
return None
user = self.u
get_signed(self.cliend, "%s/user/%s/edits" %
(API_PFX, user.pk))
def setup_edit_flags_test(self):
ghost = AnonymousUser()
self.ghost = ghost
peon = make_user(username="peon", password='pw')
peon.save_with_user(self.u)
duke = make_user(username="duke", password='pw')
duke.save_with_user(self.u)
leroi = make_user(username="leroi", password='pw')
leroi.active = True
leroi.save_with_user(self.u)
p_peon_0 = mkPlot(self.instance, self.u)
p_peon_1 = mkPlot(self.instance, self.u)
p_duke_2 = mkPlot(self.instance, self.u)
t_duke_0 = mkTree(self.instance, self.u, plot=p_peon_0)
t_peon_1 = mkTree(self.instance, self.u, plot=p_peon_1)
t_duke_2 = mkTree(self.instance, self.u, plot=p_duke_2)
p_roi_3 = mkPlot(self.instance, self.u)
t_roi_3 = mkTree(self.instance, self.u, plot=p_roi_3)
self.plots = [p_peon_0, p_peon_1, p_duke_2, p_roi_3]
self.trees = [t_duke_0, t_peon_1, t_duke_2, t_roi_3]
self.users = [ghost, peon, duke, leroi]
def mkd(self, e, d):
return {"can_delete": d, "can_edit": e}
def mkdp(self, pe, pd, te=None, td=None):
d = {"plot": self.mkd(pe, pd)}
if td is not None and te is not None:
d["tree"] = self.mkd(te, td)
return d
@skip("wait until this api is real")
def test_basic_data(self):
p = mkPlot(self.instance, self.u)
p.width = 22
p.length = 44
p.geom = Point(55, 56)
p.readonly = False
p.save_with_user(self.u)
info = self.client.get("%s/instance/%s/plots" %
(API_PFX, self.instance.url_name))
self.assertEqual(info.status_code, 200)
content = loads(info.content)
self.assertEqual(len(content), 1)
record = content[0]
self.assertEqual(record["id"], p.pk)
self.assertEqual(record["plot_width"], 22)
self.assertEqual(record["plot_length"], 44)
self.assertEqual(record["readonly"], False)
self.assertEqual(record["geom"]["srid"], 3857)
self.assertEqual(record["geom"]["x"], 55)
self.assertEqual(record["geom"]["y"], 56)
self.assertEqual(record.get("tree"), None)
@skip("wait for endpoint to be done")
def test_tree_data(self):
p = mkPlot(self.u)
t = mkTree(self.u, plot=p)
t.species = None
t.dbh = None
t.present = True
t.save()
info = self.client.get("%s/plots" % API_PFX)
self.assertEqual(info.status_code, 200)
content = loads(info.content)
self.assertEqual(content(json), 1)
record = content[0]
self.assertEqual(record["tree"]["id"], t.pk)
t.species = Species.objects.all()[0]
t.dbh = 11.2
t.save()
info = self.client.get("%s/plots" % API_PFX)
self.assertEqual(info.status_code, 200)
content = loads(info.content)
self.assertEqual(len(content), 1)
record = content[0]
self.assertEqual(record["tree"]["species"], t.species.pk)
self.assertEqual(record["tree"]["dbh"], t.dbh)
self.assertEqual(record["tree"]["id"], t.pk)
@skip("wait for endpoint to be done")
def test_paging(self):
p0 = mkPlot(self.u)
p0.present = False
p0.save()
p1 = mkPlot(self.u)
p2 = mkPlot(self.u)
p3 = mkPlot(self.u)
r = self.client.get("%s/plots?offset=0&size=2" % API_PFX)
rids = set([p["id"] for p in loads(r.content)])
self.assertEqual(rids, set([p1.pk, p2.pk]))
r = self.client.get("%s/plots?offset=1&size=2" % API_PFX)
rids = set([p["id"] for p in loads(r.content)])
self.assertEqual(rids, set([p2.pk, p3.pk]))
r = self.client.get("%s/plots?offset=2&size=2" % API_PFX)
rids = set([p["id"] for p in loads(r.content)])
self.assertEqual(rids, set([p3.pk]))
r = self.client.get("%s/plots?offset=3&size=2" % API_PFX)
rids = set([p["id"] for p in loads(r.content)])
self.assertEqual(rids, set())
r = self.client.get("%s/plots?offset=0&size=5" % API_PFX)
rids = set([p["id"] for p in loads(r.content)])
self.assertEqual(rids, set([p1.pk, p2.pk, p3.pk]))
class Locations(OTMTestCase):
def setUp(self):
self.instance = setupTreemapEnv()
self.user = User.objects.get(username="commander")
def test_locations_plots_endpoint_with_auth(self):
response = get_signed(
self.client,
"%s/instance/%s/locations/0,0/plots" % (API_PFX,
self.instance.url_name),
user=self.user)
self.assertEqual(response.status_code, 200)
def test_locations_plots_endpoint(self):
response = get_signed(
self.client,
"%s/instance/%s/locations/0,0/plots" % (API_PFX,
self.instance.url_name))
self.assertEqual(response.status_code, 200)
def test_locations_plots_endpoint_max_plots_param_must_be_a_number(self):
response = get_signed(
self.client,
"%s/instance/%s/locations/0,0/plots?max_plots=foo" % (
API_PFX, self.instance.url_name))
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
'The max_plots parameter must be '
'a number between 1 and 500')
def test_locations_plots_max_plots_param_cannot_be_greater_than_500(self):
response = get_signed(
self.client,
"%s/instance/%s/locations/0,0/plots?max_plots=501" % (
API_PFX, self.instance.url_name))
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
'The max_plots parameter must be '
'a number between 1 and 500')
response = get_signed(
self.client,
"%s/instance/%s/locations/0,0/plots?max_plots=500" %
(API_PFX, self.instance.url_name))
self.assertEqual(response.status_code, 200)
def test_locations_plots_endpoint_max_plots_param_cannot_be_less_than_1(
self):
response = get_signed(
self.client,
"%s/instance/%s/locations/0,0/plots?max_plots=0" %
(API_PFX, self.instance.url_name))
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
'The max_plots parameter must be a '
'number between 1 and 500')
response = get_signed(
self.client,
"%s/instance/%s/locations/0,0/plots?max_plots=1" %
(API_PFX, self.instance.url_name))
self.assertEqual(response.status_code, 200)
def test_locations_plots_endpoint_distance_param_must_be_a_number(self):
response = get_signed(
self.client,
"%s/instance/%s/locations/0,0/plots?distance=foo" %
(API_PFX, self.instance.url_name))
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
'The distance parameter must be a number')
response = get_signed(
self.client,
"%s/instance/%s/locations/0,0/plots?distance=42" %
(API_PFX, self.instance.url_name))
self.assertEqual(response.status_code, 200)
def test_plots(self):
plot = mkPlot(self.instance, self.user)
plot.save_with_user(self.user)
response = get_signed(
self.client,
"%s/instance/%s/locations/%s,%s/plots" %
(API_PFX, self.instance.url_name,
plot.geom.x, plot.geom.y))
self.assertEqual(response.status_code, 200)
class CreatePlotAndTree(OTMTestCase):
def setUp(self):
self.instance = setupTreemapEnv()
self.user = User.objects.get(username="commander")
rm = ReputationMetric(instance=self.instance, model_name='Plot',
action=Audit.Type.Insert, direct_write_score=2,
approval_score=20, denial_score=5)
rm.save()
def test_create_plot_with_tree(self):
data = {
"plot":
{'geom': {"y": 25,
"x": 35,
"srid": 3857}},
"tree": {
"height": 10.0
}}
###TODO: Need to create reputation metrics
plot_count = Plot.objects.count()
reputation_count = self.user.get_reputation(self.instance)
response = post_json("%s/instance/%s/plots" % (API_PFX,
self.instance.url_name),
data, self.client, self.user)
self.assertEqual(200, response.status_code,
"Create failed:" + response.content)
# Assert that a plot was added
self.assertEqual(plot_count + 1, Plot.objects.count())
# Assert that reputation went up
assert_reputation(self, reputation_count + 6)
response_json = loads(response.content)
self.assertTrue("id" in response_json['plot'])
id = response_json['plot']["id"]
plot = Plot.objects.get(pk=id)
self.assertEqual(35.0, plot.geom.x)
self.assertEqual(25.0, plot.geom.y)
tree = plot.current_tree()
self.assertIsNotNone(tree)
self.assertEqual(10.0, tree.height)
@skip("this validation should be in the main app")
def test_create_plot_with_invalid_tree_returns_400(self):
data = {
"plot":
{'geom': {"y": 35,
"x": 25,
"srid": 4326}},
"tree": {
"height": 1000000
}}
tree_count = Tree.objects.count()
reputation_count = self.user.get_reputation(self.instance)
response = post_json("%s/instance/%s/plots" % (API_PFX,
self.instance.url_name),
data, self.client, self.user)
self.assertEqual(400,
response.status_code,
"Expected creating a million foot "
"tall tree to return 400:" + response.content)
body_dict = loads(response.content)
self.assertTrue('error' in body_dict,
"Expected the body JSON to contain an 'error' key")
errors = body_dict['error']
self.assertTrue(len(errors) == 1,
"Expected a single error message to be returned")
self.assertEqual('Height is too large.', errors[0])
# Assert that a tree was _not_ added
self.assertEqual(tree_count, Tree.objects.count())
# Assert that reputation was _not_ added
assert_reputation(self, reputation_count)
def test_create_plot_with_geometry(self):
data = {
"plot": {
"geom": {
"x": 35,
"y": 25,
"srid": 3857
},
},
"tree": {
"height": 10
}}
plot_count = Plot.objects.count()
reputation_count = self.user.get_reputation(self.instance)
response = post_json("%s/instance/%s/plots" % (API_PFX,
self.instance.url_name),
data, self.client, self.user)
self.assertEqual(200, response.status_code,
"Create failed:" + response.content)
# Assert that a plot was added
self.assertEqual(plot_count + 1, Plot.objects.count())
# Assert that reputation was added
assert_reputation(self, reputation_count + 6)
response_json = loads(response.content)
self.assertTrue("id" in response_json['plot'])
id = response_json['plot']["id"]
plot = Plot.objects.get(pk=id)
self.assertEqual(35.0, plot.geom.x)
self.assertEqual(25.0, plot.geom.y)
tree = plot.current_tree()
self.assertIsNotNone(tree)
self.assertEqual(10.0, tree.height)
class UpdatePlotAndTree(OTMTestCase):
def setUp(self):
self.instance = setupTreemapEnv()
self.user = User.objects.get(username="commander")
self.public_user = User.objects.get(username="apprentice")
rm = ReputationMetric(instance=self.instance, model_name='Plot',
action=Audit.Type.Update, direct_write_score=2,
approval_score=5, denial_score=1)
rm.save()
def test_invalid_plot_id_returns_404_and_a_json_error(self):
response = put_json("%s/instance/%s/plots/0" %
(API_PFX, self.instance.url_name),
{}, self.client, self.user)
self.assertEqual(404, response.status_code)
def test_update_plot(self):
test_plot = mkPlot(self.instance, self.user)
test_plot.width = 1
test_plot.length = 2
test_plot.geocoded_address = 'foo'
test_plot.save_with_user(self.user)
self.assertEqual(0, test_plot.geom.x)
self.assertEqual(0, test_plot.geom.y)
self.assertEqual(1, test_plot.width)
self.assertEqual(2, test_plot.length)
reputation_count = self.user.get_reputation(self.instance)
updated_values = {'plot':
{'geom':
{'y': 0.001, 'x': 0.001, 'srid': 4326},
'width': 11,
'length': 22}}
response = put_json("%s/instance/%s/plots/%d" %
(API_PFX, self.instance.url_name, test_plot.pk),
updated_values, self.client, self.user)
self.assertEqual(200, response.status_code)
response_json = loads(response.content)
self.assertAlmostEqual(0.001, response_json['plot']['geom']['y'])
self.assertAlmostEqual(0.001, response_json['plot']['geom']['x'])
self.assertEqual(11, response_json['plot']['width'])
self.assertEqual(22, response_json['plot']['length'])
assert_reputation(self, reputation_count + 6)
@skip("ignore pending")
def test_update_plot_with_pending(self):
test_plot = mkPlot(self.instance, self.user)
test_plot.width = 1
test_plot.length = 2
test_plot.save_with_user(self.user)
self.assertEqual(50, test_plot.geom.x)
self.assertEqual(50, test_plot.geom.y)
self.assertEqual(1, test_plot.width)
self.assertEqual(2, test_plot.length)
self.assertEqual(0, len(Audit.pending_audits()),
"Expected the test to start with no pending records")
reputation_count = self.user.get_reputation(self.instance)
updated_values = {'geometry':
{'lat': 70, 'lon': 60},
'plot_width': 11,
'plot_length': 22}
# Send the edit request as a public user
response = put_json("%s/instance/%s/plots/%d" %
(API_PFX, self.instance.url_name, test_plot.pk),
updated_values, self.client, self.public_user)
self.assertEqual(200, response.status_code)
# Assert that nothing has changed.
# Pends should have been created instead
response_json = loads(response.content)
self.assertEqual(50, response_json['geom']['y'])
self.assertEqual(50, response_json['geom']['x'])
self.assertEqual(1, response_json['plot_width'])
self.assertEqual(2, response_json['plot_length'])
assert_reputation(self, reputation_count)
self.assertEqual(3, len(Audit.pending_audits()),
"Expected 3 pends, one for each edited field")
self.assertEqual(3, len(response_json['pending_edits'].keys()),
"Expected the json response to have a "
"pending_edits dict with 3 keys, one for each field")
def test_invalid_field_returns_200_field_is_not_in_response(self):
test_plot = mkPlot(self.instance, self.user)
updated_values = {'foo': 'bar'}
response = put_json("%s/instance/%s/plots/%d" %
(API_PFX, self.instance.url_name, test_plot.pk),
updated_values, self.client, self.user)
self.assertEqual(200, response.status_code)
response_json = loads(response.content)
self.assertFalse("error" in response_json.keys(),
"Did not expect an error")
self.assertFalse("foo" in response_json.keys(),
"Did not expect foo to be added to the plot")
def test_update_creates_tree(self):
test_plot = mkPlot(self.instance, self.user)
test_plot_id = test_plot.id
self.assertIsNone(test_plot.current_tree())
updated_values = {'tree': {'diameter': 1.2}}
response = put_json("%s/instance/%s/plots/%d" %
(API_PFX, self.instance.url_name, test_plot.pk),
updated_values, self.client, self.user)
self.assertEqual(200, response.status_code)
tree = Plot.objects.get(pk=test_plot_id).current_tree()
self.assertIsNotNone(tree)
self.assertEqual(1.2, tree.diameter)
# TODO: Waiting for issue to be fixed
# https://github.com/azavea/OTM2/issues/82
# def test_update_creates_tree_with_pending(self):
# test_plot = mkPlot(self.instance, self.user)
# test_plot_id = test_plot.id
# self.assertIsNone(test_plot.current_tree())
# self.assertEqual(0, len(Audit.pending_audits()),
# "Expected the test to start with no pending records")
# updated_values = {'tree': {'diameter': 1.2}}
# response = put_json("%s/instance/%s/plots/%d" %
# (API_PFX, self.instance.url_name, test_plot.pk),
# updated_values, self.client, self.public_user)
# self.assertEqual(200, response.status_code)
# self.assertEqual(0, len(Pending.objects.all()),
# "Expected a new tree to be created, "
# "rather than creating pends")
# tree = Plot.objects.get(pk=test_plot_id).current_tree()
# self.assertIsNotNone(tree)
# self.assertEqual(1.2, tree.dbh)
def test_update_tree(self):
test_plot = mkPlot(self.instance, self.user)
test_tree = mkTree(self.instance, self.user, plot=test_plot)
test_tree_id = test_tree.id
test_tree.diameter = 2.3
test_tree.save_with_user(self.user)
updated_values = {'tree': {'diameter': 3.9}}
response = put_json("%s/instance/%s/plots/%d" %
(API_PFX, self.instance.url_name, test_plot.id),
updated_values, self.client, self.user)
self.assertEqual(200, response.status_code)
tree = Tree.objects.get(pk=test_tree_id)
self.assertIsNotNone(tree)
self.assertEqual(3.9, tree.diameter)
@skip("ignore pending")
def test_update_tree_with_pending(self):
test_plot = mkPlot(self.instance, self.user)
test_tree = mkTree(self.instance, self.user, plot=test_plot)
test_tree_id = test_tree.pk
test_tree.diameter = 2.3
test_tree.save_with_user(self.user)
self.assertEqual(0, len(Audit.pending_audits()),
"Expected the test to start with no pending records")
updated_values = {'tree': {'diameter': 3.9}}
response = put_json("%s/instance/%s/plots/%d" %
(API_PFX, self.instance.url_name, test_plot.pk),
updated_values, self.client, self.public_user)
self.assertEqual(200, response.status_code)
tree = Tree.objects.get(pk=test_tree_id)
self.assertIsNotNone(tree)
self.assertEqual(2.3, tree.diameter,
"A pend should have been created instead"
" of editing the tree value.")
self.assertEqual(1, len(Audit.pending_audits()),
"Expected 1 pend record for the edited field.")
response_json = loads(response.content)
self.assertEqual(1, len(response_json['pending_edits'].keys()),
"Expected the json response to have a"
" pending_edits dict with 1 keys")
def test_update_tree_species(self):
test_plot = mkPlot(self.instance, self.user)
test_tree = mkTree(self.instance, self.user, plot=test_plot)
test_tree_id = test_tree.id
first_species = Species.objects.all()[0]
updated_values = {'tree': {'species': {'id': first_species.id}}}
response = put_json("%s/instance/%s/plots/%d" %
(API_PFX, self.instance.url_name, test_plot.pk),
updated_values, self.client, self.user)
self.assertEqual(200, response.status_code)
tree = Tree.objects.get(pk=test_tree_id)
self.assertIsNotNone(tree)
self.assertEqual(first_species, tree.species)
def test_update_tree_returns_404_on_invalid_species_id(self):
test_plot = mkPlot(self.instance, self.user)
mkTree(self.instance, self.user, plot=test_plot)
invalid_species_id = -1
self.assertRaises(Exception,
Species.objects.get, pk=invalid_species_id)
updated_values = {'tree': {'species': {'id': invalid_species_id}}}
response = put_json("%s/instance/%s/plots/%d" %
(API_PFX, self.instance.url_name, test_plot.pk),
updated_values, self.client, self.user)
self.assertEqual(404, response.status_code)
def test_approve_pending_edit_returns_404_for_invalid_pend_id(self):
invalid_pend_id = -1
self.assertRaises(Exception, Audit.objects.get, pk=invalid_pend_id)
url = "%s/instance/%s/pending-edits/%d/approve/" % (
API_PFX, self.instance.url_name, invalid_pend_id)
response = post_json(url, None, self.client, self.user)
self.assertEqual(404, response.status_code,
"Expected approving and invalid "
"pend id to return 404")
def test_reject_pending_edit_returns_404_for_invalid_pend_id(self):
invalid_pend_id = -1
self.assertRaises(Exception, Audit.objects.get, pk=invalid_pend_id)
url = "%s/instance/%s/pending-edits/%d/reject/" % (
API_PFX, self.instance.url_name, invalid_pend_id)
response = post_json(url, None, self.client, self.user)
self.assertEqual(404, response.status_code,
"Expected approving and invalid pend "
" id to return 404")
@skip("waiting for pending integration")
def test_approve_pending_edit(self):
self.assert_pending_edit_operation(Audit.Type.PendingApprove)
@skip("waiting for pending integration")
def test_reject_pending_edit(self):
self.assert_pending_edit_operation(Audit.Type.PendingReject)
def assert_pending_edit_operation(self, action,
original_dbh=2.3, edited_dbh=3.9):
test_plot = mkPlot(self.instance, self.user)
test_tree = mkTree(self.instance, self.user, plot=test_plot)
test_tree_id = test_tree.id
test_tree.diameter = original_dbh
test_tree.save_with_user(self.user)
self.assertEqual(0, len(Audit.pending_audits()),
"Expected the test to start with no pending records")
updated_values = {'tree': {'diameter': edited_dbh}}
response = put_json("%s/instance/%s/plots/%d" %
(API_PFX, self.instance.url_name, test_plot.id),
updated_values, self.client,
self.public_user)
self.assertEqual(200, response.status_code)
tree = Tree.objects.get(pk=test_tree_id)
self.assertIsNotNone(tree)
self.assertEqual(original_dbh, tree.diameter,
"A pend should have been created instead"
" of editing the tree value.")
self.assertEqual(1, len(Audit.pending_audits()),
"Expected 1 pend record for the edited field.")
pending_edit = Audit.pending_audits()[0]
self.assertEqual(None, pending_edit.ref,
"Expected that the audit has not been applied")
if action == Audit.Type.PendingApprove:
action_str = 'approve'
else:
action_str = 'reject'
response = post_json("%s/instance/%s/pending-edits/%d/%s/" %
(API_PFX, self.instance.url_name,
pending_edit.id, action_str),
None, self.client, self.user)
self.assertEqual(200, response.status_code)
pending_edit = Audit.objects.get(pk=pending_edit.id)
self.assertIsNotNone(pending_edit.ref,
"Expected the audit to be marked as processed")
pending_edit_marked = pending_edit.ref
self.assertEqual(pending_edit_marked.action,
action,
"Expected the type of the audit to be '%s'" %
action)
test_tree = Tree.objects.get(pk=test_tree_id)
if action == Audit.Type.PendingApprove:
self.assertEqual(edited_dbh, test_tree.diameter,
"Expected dbh to have been updated on the Tree")
elif action == Audit.Type.PendingReject:
self.assertEqual(original_dbh, test_tree.diameter,
"Expected dbh to NOT have been "
"updated on the Tree")
response_json = loads(response.content)
self.assertTrue('tree' in response_json)
self.assertTrue('dbh' in response_json['tree'])
if action == Audit.Type.PendingApprove:
self.assertEqual(edited_dbh,
response_json['tree']['dbh'],
"Expected dbh to have been updated"
" in the JSON response")
elif action == Audit.Type.PendingReject:
self.assertEqual(original_dbh,
response_json['tree']['dbh'],
"Expected dbh to NOT have been "
"updated in the JSON response")
@skip("waiting for pending integration")
def test_approve_plot_pending_with_mutiple_pending_edits(self):
test_plot = mkPlot(self.instance, self.user)
test_plot.width = 100
test_plot.length = 50
test_plot.save_with_user(self.user)
test_tree = mkTree(self.instance, self.user, plot=test_plot)
test_tree.diameter = 2.3
test_tree.save_with_user(self.user)
updated_values = {
"plot_width": 125,
"plot_length": 25,
"tree": {
"dbh": 3.9
}
}
response = put_json("%s/instance/%s/plots/%d" %
(API_PFX, self.instance.url_name, test_plot.id),
updated_values, self.client, self.public_user)
self.assertEqual(response.status_code, 200,
"Non 200 response when updating plot")
updated_values = {
"plot_width": 175,
}
response = put_json("%s/instance/%s/plots/%d" %
(API_PFX, self.instance.url_name, test_plot.id),
updated_values,
self.client, self.public_user)
self.assertEqual(response.status_code, 200,
"Non 200 response when updating plot")
test_plot = Plot.objects.get(pk=test_plot.pk)
pending_edit_count = len(test_plot.get_active_pending_audits())
self.assertEqual(3, pending_edit_count,
"Expected three pending edits but got %d" %
pending_edit_count)
pend = test_plot.get_active_pending_audits()[0]
approved_pend_id = pend.id
url = "%s/instance/%s/pending-edits/%d/approve/" % (
API_PFX, self.instance.url_name, approved_pend_id)
response = post_json(url, None, self.client, self.user)
self.assertEqual(response.status_code, 200,
"Non 200 response when approving the pend")
self.assertEqual(1, len(test_plot.get_active_pending_audits()),
"Expected there to be 1 pending edits after approval")
@skip("waiting for normal plot/tree delete integration")
def test_remove_plot(self):
plot = mkPlot(self.instance, self.user)
plot_id = plot.pk
tree = mkTree(self.instance, self.user, plot=plot)
tree_id = tree.pk
url = "%s/instance/%s/plots/%d" % (API_PFX,
self.instance.url_name,
plot_id)
response = self.client.delete(url, **self.user)
self.assertEqual(403, response.status_code,
"Expected 403 when there's still a tree")
tree.delete_with_user(self.user)
response = self.client.delete(url, **self.user)
self.assertEqual(200, response.status_code,
"Expected 200 status code after delete")
response_dict = loads(response.content)
self.assertTrue('ok' in response_dict,
'Expected a json object response with a "ok" key')
self.assertTrue(response_dict['ok'],
'Expected a json object response with a "ok" key'
'set to True')
plots = Plot.objects.filter(pk=plot_id)
trees = Tree.objects.filter(pk=tree_id)
self.assertTrue(len(plots) == 0, 'Expected plot to be gone')
self.assertTrue(len(trees) == 0, 'Expected tree to be gone')
@skip("waiting for normal plot/tree delete integration")
def test_remove_tree(self):
plot = mkPlot(self.instance, self.user)
plot_id = plot.pk
tree = mkTree(self.instance, self.user, plot=plot)
tree_id = tree.pk
url = "%s/instance/%s/plots/%d/tree" % (API_PFX,
self.instance.url_name,
plot_id)
response = self.client.delete(url, **self.user)
self.assertEqual(200, response.status_code,
"Expected 200 status code after delete")
response_dict = loads(response.content)
self.assertIsNone(response_dict['tree'],
'Expected a json object response to a None'
'value for "tree" key after the tree is deleted')
plot = Plot.objects.filter(pk=plot_id)
tree = Tree.objects.filter(pk=tree_id)
self.assertTrue(len(plot) == 1, 'Expected plot to be here')
self.assertTrue(len(tree) == 0, 'Expected tree to be gone')
def test_update_collection_udf(self):
test_plot = mkPlot(self.instance, self.user)
updated_values = {'tree':
{'udf:Stewardship':
[{'Action': 'Watered', 'Date': '2014-01-01'}]},
'plot':
{'udf:Stewardship':
[{'Action': 'Enlarged', 'Date': '2014-02-02'}]}}
response = put_json("%s/instance/%s/plots/%d" %
(API_PFX, self.instance.url_name, test_plot.pk),
updated_values, self.client, self.user)
self.assertEqual(200, response.status_code)
response_json = loads(response.content)
updated_plot = Plot.objects.get(pk=test_plot.pk)
new_tree = updated_plot.current_tree()
self.assertEqual(updated_plot.pk, response_json['plot']['id'])
self.assertEqual(new_tree.pk,
response_json['tree']['id'])
self.assertEqual(updated_plot.udfs['Stewardship'][0]['id'],
response_json['plot']['udf:Stewardship'][0]['id'])
self.assertEqual(new_tree.udfs['Stewardship'][0]['id'],
response_json['tree']['udf:Stewardship'][0]['id'])
def _create_multichoice(self):
udf = udf_create({'udf.name': 'multi', 'udf.model': 'Plot',
'udf.type': 'multichoice',
'udf.choices': ['a', 'b', 'c']},
self.instance)
udf_perm = FieldPermission.objects.get(
model_name='Plot',
field_name=udf.canonical_name,
role=self.user.get_role(self.instance),
instance=self.instance)
udf_perm.permission_level = FieldPermission.WRITE_DIRECTLY
udf_perm.save()
def test_update_multichoice_v3(self):
self._create_multichoice()
test_plot = mkPlot(self.instance, self.user)
test_plot.udfs['multi'] = None
test_plot.save_with_user(self.user)
updated_values = {'plot': {
'geom': {'y': 0, 'x': 0, 'srid': 4326}, 'udf:multi': None}
}
response = put_json("/api/v3/instance/%s/plots/%d" %
(self.instance.url_name, test_plot.pk),
updated_values, self.client, self.user)
self.assertEqual(200, response.status_code)
response_json = loads(response.content)
self.assertEqual([], response_json['plot']['udf:multi'])
test_plot.udfs['multi'] = ['b', 'c']
test_plot.save_with_user(self.user)
updated_values = {'plot': {
'geom': {'y': 0, 'x': 0, 'srid': 4326}, 'udf:multi': ['b', 'c']}
}
response = put_json("/api/v3/instance/%s/plots/%d" %
(self.instance.url_name, test_plot.pk),
updated_values, self.client, self.user)
self.assertEqual(200, response.status_code)
response_json = loads(response.content)
self.assertEqual(['b', 'c'], response_json['plot']['udf:multi'])
class Instance(LocalMediaTestCase):
test_png_name = '2by2.png'
test_png_path = os.path.join(
os.path.dirname(__file__),
'test_resources', test_png_name)
def setUp(self):
super(Instance, self).setUp()
self.instance = make_instance(is_public=True, point=Point(0, 0))
create_stewardship_udfs(self.instance)
self.user = make_commander_user(instance=self.instance)
udf = udf_create({'udf.name': 'multi', 'udf.model': 'Plot',
'udf.type': 'multichoice',
'udf.choices': ['a', 'b', 'c']},
self.instance)
udf_perm = FieldPermission.objects.get(
model_name='Plot',
field_name=udf.canonical_name,
role=self.user.get_role(self.instance),
instance=self.instance)
udf_perm.permission_level = FieldPermission.WRITE_DIRECTLY
udf_perm.save()
self.expected_scss_variables = {
'primary-color': '123456',
'secondary-color': '987654'
}
self.instance.config['scss_variables'] = self.expected_scss_variables
self.instance.mobile_api_fields = [
{'header': 'Tree Information',
'model': 'tree',
'field_keys': ['tree.species', 'tree.diameter',
'tree.height', 'tree.date_planted']},
{'header': 'Planting Site Information',
'model': 'plot',
'field_keys': ['plot.width', 'plot.length', 'plot.udf:multi']},
{'header': 'Stewardship',
'collection_udf_keys': ['plot.udf:Stewardship',
'tree.udf:Stewardship'],
'sort_key': 'Date'}
]
self.instance.save()
self.instance.logo.save(Instance.test_png_name,
File(open(Instance.test_png_path, 'r')))
def test_returns_config_colors(self):
request = sign_request_as_user(make_request(), self.user)
info = instance_info(request, self.instance)
self.assertEqual(self.expected_scss_variables,
info['config']['scss_variables'])
def test_returns_only_scss_variables(self):
request = sign_request_as_user(make_request(), self.user)
info = instance_info(request, self.instance)
self.assertTrue('scss_variables' in info['config'],
'"scss_variables" should exist in instance.config')
self.assertEqual(1, len(info['config']),
'The config dict should only contain a single, '
'white listed item')
@media_dir
def test_returns_logo(self):
request = sign_request_as_user(make_request(), self.user)
info = instance_info(request, self.instance)
# The actual file name can be changed by the django machinery
# so I test for a fragment that remains the same
self.assertTrue('logos/2by2' in info['logoUrl'])
def test_date_format_conversion(self):
self.instance.date_format = 'F j, Y'
self.instance.short_date_format = 'm/d/Y'
self.instance.save()
request = sign_request_as_user(make_request(), self.user)
info = instance_info(request, self.instance)
self.assertEqual('MM/dd/yyyy', info.get('short_date_format'))
self.assertEqual('MMMM d, yyyy', info.get('date_format'))
def test_removes_unreadable_api_fields(self):
request = sign_request_as_user(make_request(user=self.user), self.user)
info = instance_info(request, self.instance)
self.assertDictContainsSubset(
{'field_keys': ['tree.species', 'tree.diameter', 'tree.height',
'tree.date_planted']},
info.get('field_key_groups')[0])
set_invisible_permissions(self.instance, self.user, 'Tree',
['species', 'diameter', 'height'])
info = instance_info(request, self.instance)
self.assertDictContainsSubset(
{'field_keys': ['tree.date_planted']},
info.get('field_key_groups')[0])
def test_collection_udfs_v3(self):
request = sign_request_as_user(make_request(user=self.user), self.user)
response = instance_info_endpoint(request, 3, self.instance.url_name)
info_dict = json.loads(response.content)
self.assertIn({'header': 'Stewardship',
'collection_udf_keys': ['plot.udf:Stewardship',
'tree.udf:Stewardship'],
'sort_key': 'Date'},
info_dict['field_key_groups'])
def test_collection_udfs_removed_in_v2(self):
request = sign_request_as_user(make_request(user=self.user), self.user)
response = instance_info_endpoint(request, 2, self.instance.url_name)
info_dict = json.loads(response.content)
for field_group in info_dict['field_key_groups']:
self.assertNotIn('collection_udf_keys', field_group)
def test_multichoice_fields_v4(self):
request = sign_request_as_user(make_request(user=self.user), self.user)
response = instance_info_endpoint(request, 4, self.instance.url_name)
info_dict = json.loads(response.content)
self.assertIn('plot.udf:multi', info_dict['fields'].keys())
self.assertTrue(any('plot.udf:multi' in group.get('field_keys', [])
for group in info_dict['field_key_groups']))
def test_multichoice_removed_in_v3(self):
request = sign_request_as_user(make_request(user=self.user), self.user)
response = instance_info_endpoint(request, 3, self.instance.url_name)
info_dict = json.loads(response.content)
self.assertNotIn('plot.udf:multi', info_dict['fields'].keys())
self.assertFalse(any('plot.udf:multi' in group.get('field_keys', [])
for group in info_dict['field_key_groups']))
@override_settings(NEARBY_INSTANCE_RADIUS=2)
@override_settings(FEATURE_BACKEND_FUNCTION=None)
@override_settings(MOBILE_INSTANCES_FUNCTION=None)
class InstancesClosestToPoint(OTMTestCase):
def setUp(self):
self.i1 = make_instance(is_public=True, point=Point(0, 0))
self.i2 = make_instance(is_public=False, point=Point(0, 0))
self.i3 = make_instance(is_public=True, point=Point(0, 9000))
self.i4 = make_instance(is_public=False, point=Point(10000, 0))
self.user = make_commander_user(instance=self.i2)
def test_nearby_list_default(self):
request = sign_request_as_user(make_request(), self.user)
instance_infos = instances_closest_to_point(request, 0, 0)
self.assertEqual(1, len(instance_infos['nearby']))
self.assertEqual(self.i1.pk, instance_infos['nearby'][0]['id'])
self.assertEqual(0, len(instance_infos['personal']))
def test_nearby_list_distance(self):
request = sign_request_as_user(
make_request({'distance': 100000}), self.user)
instance_infos = instances_closest_to_point(request, 0, 0)
self.assertEqual(2, len(instance_infos))
self.assertEqual(self.i1.pk, instance_infos['nearby'][0]['id'])
self.assertEqual(self.i3.pk, instance_infos['nearby'][1]['id'])
self.assertEqual(0, len(instance_infos['personal']))
def test_user_list_default(self):
request = sign_request_as_user(
make_request(user=self.user), self.user)
instance_infos = instances_closest_to_point(request, 0, 0)
self.assertEqual(1, len(instance_infos['nearby']))
self.assertEqual(self.i1.pk, instance_infos['nearby'][0]['id'])
self.assertEqual(1, len(instance_infos['personal']))
self.assertEqual(self.i2.pk, instance_infos['personal'][0]['id'])
def test_user_list_max(self):
request = sign_request_as_user(
make_request({'max': 3, 'distance': 100000}, user=self.user),
self.user)
instance_infos = instances_closest_to_point(request, 0, 0)
self.assertEqual(2, len(instance_infos['nearby']))
self.assertEqual(self.i1.pk, instance_infos['nearby'][0]['id'])
self.assertEqual(self.i3.pk, instance_infos['nearby'][1]['id'])
self.assertEqual(1, len(instance_infos['personal']))
self.assertEqual(self.i2.pk, instance_infos['personal'][0]['id'])
@override_settings(FEATURE_BACKEND_FUNCTION=None)
@override_settings(MOBILE_INSTANCES_FUNCTION=None)
class PublicInstancesTest(OTMTestCase):
def setUp(self):
self.i1 = make_instance(is_public=True, point=Point(0, 0))
self.i2 = make_instance(is_public=False, point=Point(0, 0))
def test_public_list(self):
request = sign_request(make_request())
instance_infos = public_instances(request)
self.assertEqual(1, len(instance_infos))
self.assertEqual(self.i1.pk, instance_infos[0]['id'])
class TreePhotoTest(LocalMediaTestCase):
test_jpeg_path = os.path.join(
os.path.dirname(__file__),
'test_resources', '2by2.jpeg')
test_png_path = os.path.join(
os.path.dirname(__file__),
'test_resources', '2by2.png')
def setUp(self):
super(TreePhotoTest, self).setUp()
self.instance = setupTreemapEnv()
self.user = User.objects.get(username="commander")
self.factory = RequestFactory()
def assertSuccessfulResponse(self, response):
self.assertIsNotNone(response)
self.assertIsNotNone(response.content)
response_dict = loads(response.content)
self.assertTrue('id' in response_dict)
self.assertTrue('thumbnail' in response_dict)
self.assertTrue('image' in response_dict)
def _test_post_photo(self, path):
plot = mkPlot(self.instance, self.user)
plot_id = plot.pk
self.assertIsNone(plot.current_tree())
url = "%s/instance/%s/plots/%d/tree/photo" % (API_PFX,
self.instance.url_name,
plot_id)
with open(path) as img:
req = self.factory.post(
url, {'name': 'afile', 'file': img})
req = sign_request_as_user(req, self.user)
response = add_photo_endpoint(req, LATEST_API,
self.instance.url_name, plot_id)
plot = Plot.objects.get(pk=plot.pk)
self.assertSuccessfulResponse(response)
self.assertIsNotNone(plot.current_tree())
self.assertEqual(plot.current_tree().treephoto_set.count(), 1)
@media_dir
def test_jpeg_tree_photo_file_name(self):
self._test_post_photo(TreePhotoTest.test_jpeg_path)
@media_dir
def test_png_tree_photo_file_name(self):
self._test_post_photo(TreePhotoTest.test_png_path)
class UserTest(LocalMediaTestCase):
def setUp(self):
super(UserTest, self).setUp()
self.factory = RequestFactory()
self.defaultBaseUserDict = {'organization': 'azavea',
'last_name': 'smith',
'first_name': 'john',
'email': '[email protected]',
'username': 'jsmith',
'password': 'password'}
self.defaultUserDict = {'allow_email_contact': True}
self.defaultUserDict.update(self.defaultBaseUserDict)
def make_post_request(self, datadict):
r = sign_request(make_request(method='POST',
body=dumps(datadict)))
return r
@media_dir
def testUploadPhoto(self):
peon = make_user(username='peon', password='pw')
peon.save()
url = reverse('update_user_photo', kwargs={'version': 3,
'user_id': peon.pk})
with open(TreePhotoTest.test_jpeg_path) as img:
req = self.factory.post(
url, {'name': 'afile', 'file': img})
req = sign_request_as_user(req, peon)
response = update_profile_photo_endpoint(req, LATEST_API,
str(peon.pk))
self.assertEquals(response.status_code, 200)
peon = User.objects.get(pk=peon.pk)
self.assertIsNotNone(peon.photo)
self.assertIsNotNone(peon.thumbnail)
@media_dir
def testCanOnlyUploadAsSelf(self):
# Must do this as yourself
peon = make_user(username='peon', password='pw')
peon.save()
url = reverse('update_user_photo', kwargs={'version': 3,
'user_id': peon.pk})
grunt = make_user(username='grunt', password='pw')
grunt.save()
with open(TreePhotoTest.test_jpeg_path) as img:
req = self.factory.post(
url, {'name': 'afile', 'file': img})
req = sign_request_as_user(req, peon)
response = update_profile_photo_endpoint(req, LATEST_API,
str(grunt.pk))
self.assertEquals(response.status_code, 403)
def testCreateUser(self):
rslt = create_user(self.make_post_request(self.defaultUserDict))
pk = rslt['id']
user = User.objects.get(pk=pk)
for field, target_value in self.defaultUserDict.iteritems():
if field != 'password':
self.assertEqual(getattr(user, field), target_value)
valid_password = user.check_password(self.defaultUserDict['password'])
self.assertEqual(valid_password, True)
def testGetUserVersion2(self):
peon = make_user(username='peon', password='pw')
peon.first_name = 'Puny'
peon.last_name = 'Mortal'
peon.save()
url = reverse('user_info', kwargs={'version': 2})
ret = loads(get_signed(self.client, url, user=peon).content)
self.assertEqual(peon.first_name, ret['firstname'])
self.assertEqual(peon.last_name, ret['lastname'])
def testCreateDuplicateUsername(self):
create_user(self.make_post_request(self.defaultUserDict))
self.defaultUserDict['email'] = '[email protected]'
resp = create_user(self.make_post_request(self.defaultUserDict))
self.assertEqual(resp.status_code, 409)
self.defaultUserDict['username'] = 'jsmith2'
resp = create_user(self.make_post_request(self.defaultUserDict))
self.assertEqual(User.objects.filter(pk=resp['id']).exists(), True)
def testCreateDuplicateEmail(self):
create_user(self.make_post_request(self.defaultUserDict))
self.defaultUserDict['username'] = 'jsmith2'
resp = create_user(self.make_post_request(self.defaultUserDict))
self.assertEqual(resp.status_code, 409)
self.defaultUserDict['email'] = '[email protected]'
resp = create_user(self.make_post_request(self.defaultUserDict))
self.assertEqual(User.objects.filter(pk=resp['id']).exists(), True)
def testMissingFields(self):
del self.defaultUserDict['email']
self.assertRaises(ValidationError,
create_user,
self.make_post_request(self.defaultUserDict))
self.defaultUserDict['email'] = '[email protected]'
resp = create_user(self.make_post_request(self.defaultUserDict))
self.assertIsNotNone(resp['id'])
def testInvalidField(self):
self.defaultUserDict['hardy'] = 'heron'
self.assertRaises(ValidationError,
create_user,
self.make_post_request(self.defaultUserDict))
del self.defaultUserDict['hardy']
resp = create_user(self.make_post_request(self.defaultUserDict))
self.assertIsNotNone(resp['id'])
def test_boolean_defaults_empty(self):
user_id = create_user(
self.make_post_request(self.defaultBaseUserDict))['id']
user = User.objects.get(pk=user_id)
self.assertEqual(user.allow_email_contact, False)
self.assertEqual(user.make_info_public, False)
def test_boolean_defaults_partial(self):
user_id = create_user(
self.make_post_request(self.defaultUserDict))['id']
user = User.objects.get(pk=user_id)
self.assertEqual(user.allow_email_contact, True)
self.assertEqual(user.make_info_public, False)
def testUpdateUserRequiredField(self):
peon = make_user(username='peon', password='pw')
peon.save()
url = reverse('update_user', kwargs={'version': 3,
'user_id': peon.pk})
def updatePeonRequest(d):
return put_json(url, d, self.client, user=peon)
updatePeonRequest({'last_name': 'l1'})
peon = User.objects.get(pk=peon.pk)
self.assertEquals(peon.last_name, 'l1')
updatePeonRequest({'last_name': 'l2'})
peon = User.objects.get(pk=peon.pk)
self.assertEquals(peon.last_name, 'l2')
updatePeonRequest({'password': 'whateva'})
peon = User.objects.get(pk=peon.pk)
valid_password = peon.check_password('whateva')
self.assertTrue(valid_password)
def testUpdateUserNameVersion2(self):
peon = make_user(username='peon', password='pw')
peon.save()
url = reverse('update_user', kwargs={'version': 2,
'user_id': peon.pk})
def updatePeonRequest(d):
return put_json(url, d, self.client, user=peon)
updatePeonRequest({'lastname': 'l1'})
peon = User.objects.get(pk=peon.pk)
self.assertEquals(peon.last_name, 'l1')
updatePeonRequest({'lastname': 'l2'})
peon = User.objects.get(pk=peon.pk)
self.assertEquals(peon.last_name, 'l2')
def testCantRemoveRequiredFields(self):
peon = make_user(username='peon', password='pw')
peon.save()
url = reverse('update_user', kwargs={'version': 3,
'user_id': peon.pk})
resp = put_json(url, {'username': ''},
self.client, user=peon)
self.assertEquals(resp.status_code, 400)
def testCanOnlyUpdateLoggedInUser(self):
peon = make_user(username='peon', password='pw')
peon.save()
grunt = make_user(username='grunt', password='pw')
grunt.save()
url = reverse('update_user', kwargs={'version': 3,
'user_id': peon.pk})
resp = put_json(url, {'password': 'whateva'},
self.client, user=grunt)
self.assertEquals(resp.status_code, 403)
class SigningTest(OTMTestCase):
def setUp(self):
self.factory = RequestFactory()
def process_request_through_url(self, req):
return check_signature(
lambda req, *args, **kwargs: req)(req)
def sign_and_send(self, path, secret):
"""
Sign and "send" a request for a given path
If there is an issue this method returns an
HttpResponse object that was generated
If signing is a success this method returns
the authenticated request
`self.assertRequestWasSuccess` will check
the result of this function to make sure that
signing worked
"""
req = self.factory.get(path)
req.META['HTTP_HOST'] = 'testserver.com'
sig = get_signature_for_request(req, secret)
req = self.factory.get('%s&signature=%s' % (path, sig))
req.META['HTTP_HOST'] = 'testserver.com'
resp = self.process_request_through_url(req)
return resp
def assertRequestWasSuccess(self, thing):
# If we got an http request, we're golden
self.assertIsInstance(thing, HttpRequest)
def testAwsExample(self):
# http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
req = self.factory.get('https://elasticmapreduce.amazonaws.com?'
'AWSAccessKeyId=AKIAIOSFODNN7EXAMPLE&Action='
'DescribeJobFlows&SignatureMethod=HmacSHA256&'
'SignatureVersion=2&Timestamp='
'2011-10-03T15%3A19%3A30&Version=2009-03-31&'
'Signature='
'i91nKc4PWAt0JJIdXwz9HxZCJDdiy6cf%2FMj6vPxy'
'YIs%3')
req.META['HTTP_HOST'] = 'elasticmapreduce.amazonaws.com'
sig = get_signature_for_request(
req, b'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY')
self.assertEquals(
sig, 'i91nKc4PWAt0JJIdXwz9HxZCJDdiy6cf/Mj6vPxyYIs=')
def testTimestampVoidsSignature(self):
acred = APIAccessCredential.create()
url = ('http://testserver.com/test/blah?'
'timestamp=%%s&'
'k1=4&k2=a&access_key=%s' % acred.access_key)
curtime = datetime.datetime.now()
invalid = curtime - datetime.timedelta(minutes=100)
req = self.sign_and_send(url % invalid.strftime(SIG_TIMESTAMP_FORMAT),
acred.secret_key)
self.assertEqual(req.status_code, 400)
timestamp = curtime.strftime(SIG_TIMESTAMP_FORMAT)
req = self.sign_and_send(url % timestamp, acred.secret_key)
self.assertRequestWasSuccess(req)
def testPOSTBodyChangesSig(self):
url = "%s/i/plots/1/tree/photo" % API_PFX
def get_sig(path):
with open(path) as img:
req = self.factory.post(
url, {'name': 'afile', 'file': img})
req = sign_request(req)
return req.REQUEST['signature']
sig1 = get_sig(TreePhotoTest.test_png_path)
sig2 = get_sig(TreePhotoTest.test_jpeg_path)
self.assertNotEqual(sig1, sig2)
def testChangingUrlChangesSig(self):
path = 'http://testserver.com/test/blah?access_key=abc'
req = self.factory.get(path)
req.META['HTTP_HOST'] = 'testserver.com'
sig1 = get_signature_for_request(req, b'secret')
path = 'http://testserver.com/test/blah?access_key=abd'
req = self.factory.get(path)
req.META['HTTP_HOST'] = 'testserver.com'
sig2 = get_signature_for_request(req, b'secret')
self.assertNotEqual(sig1, sig2)
def testChangingSecretChangesKey(self):
path = 'http://testserver.com/test/blah?access_key=abc'
req = self.factory.get(path)
req.META['HTTP_HOST'] = 'testserver.com'
sig1 = get_signature_for_request(req, b'secret1')
sig2 = get_signature_for_request(req, b'secret2')
self.assertNotEqual(sig1, sig2)
def testMalformedTimestamp(self):
acred = APIAccessCredential.create()
timestamp = datetime.datetime.now().strftime(SIG_TIMESTAMP_FORMAT)
url = ('http://testserver.com/test/blah?'
'timestamp=%%s&'
'k1=4&k2=a&access_key=%s' % acred.access_key)
req = self.sign_and_send(url % ('%sFAIL' % timestamp),
acred.secret_key)
self.assertEqual(req.status_code, 400)
req = self.sign_and_send(url % timestamp, acred.secret_key)
self.assertRequestWasSuccess(req)
def testMissingAccessKey(self):
acred = APIAccessCredential.create()
timestamp = datetime.datetime.now().strftime(SIG_TIMESTAMP_FORMAT)
url = ('http://testserver.com/test/blah?'
'timestamp=%s&'
'k1=4&k2=a' % timestamp)
req = self.sign_and_send(url, acred.secret_key)
self.assertEqual(req.status_code, 400)
req = self.sign_and_send('%s&access_key=%s' % (url, acred.access_key),
acred.secret_key)
self.assertRequestWasSuccess(req)
def testAuthenticatesAsUser(self):
peon = make_user(username='peon', password='pw')
peon.save()
acred = APIAccessCredential.create(user=peon)
timestamp = datetime.datetime.now().strftime(SIG_TIMESTAMP_FORMAT)
req = self.sign_and_send('http://testserver.com/test/blah?'
'timestamp=%s&'
'k1=4&k2=a&access_key=%s' %
(timestamp, acred.access_key),
acred.secret_key)
self.assertEqual(req.user.pk, peon.pk)
class Authentication(OTMTestCase):
def setUp(self):
self.instance = setupTreemapEnv()
self.jim = User.objects.get(username="jim")
def test_401(self):
ret = get_signed(self.client, "%s/user" % API_PFX)
self.assertEqual(ret.status_code, 401)
def test_ok(self):
auth = base64.b64encode("jim:password")
withauth = {"HTTP_AUTHORIZATION": "Basic %s" % auth}
ret = get_signed(self.client, "%s/user" % API_PFX, **withauth)
self.assertEqual(ret.status_code, 200)
def test_malformed_auth(self):
withauth = {"HTTP_AUTHORIZATION": "FUUBAR"}
ret = get_signed(self.client, "%s/user" % API_PFX, **withauth)
self.assertEqual(ret.status_code, 401)
auth = base64.b64encode("foobar")
withauth = {"HTTP_AUTHORIZATION": "Basic %s" % auth}
ret = get_signed(self.client, "%s/user" % API_PFX, **withauth)
self.assertEqual(ret.status_code, 401)
def test_bad_cred(self):
auth = base64.b64encode("jim:passwordz")
withauth = {"HTTP_AUTHORIZATION": "Basic %s" % auth}
ret = get_signed(self.client, "%s/user" % API_PFX, **withauth)
self.assertEqual(ret.status_code, 401)
@skip("We can't return reputation until login takes an instance")
def test_user_has_rep(self):
ijim = self.jim.get_instance_user(self.instance)
ijim.reputation = 1001
ijim.save()
auth = base64.b64encode("jim:password")
withauth = dict(self.sign.items() +
[("HTTP_AUTHORIZATION", "Basic %s" % auth)])
ret = self.client.get("%s/user" % API_PFX, **withauth)
self.assertEqual(ret.status_code, 200)
content = loads(ret.content)
self.assertEqual(content['username'], self.jim.username)
self.assertEqual(content['status'], 'success')
self.assertEqual(content['reputation'], 1001)
class UserApiExportsTest(UserExportsTestCase):
def _test_requires_admin_access(self, endpoint_name):
url = reverse('user_csv',
kwargs={'version': 3,
'instance_url_name': self.instance.url_name})
iuser = self.user1.get_instance_user(self.instance)
iuser.admin = False
iuser.save_with_user(iuser)
resp = get_signed(self.client, url, user=self.user1)
self.assertEquals(resp.status_code, 403)
iuser.admin = True
iuser.save_with_user(self.user1)
resp = get_signed(self.client, url, user=self.user1)
self.assertEquals(resp.status_code, 200)
iuser.delete_with_user(self.user1)
resp = get_signed(self.client, url, user=self.user1)
self.assertEquals(resp.status_code, 401)
def test_csv_requires_admin(self):
self._test_requires_admin_access('users_csv')
def test_json_requires_admin(self):
self._test_requires_admin_access('users_json')
class PasswordResetTest(OTMTestCase):
def setUp(self):
self.instance = setupTreemapEnv()
self.jim = User.objects.get(username="jim")
def test_send_password_reset_email_url(self):
url = "%s/send-password-reset-email?email=%s"
response = post_json(url % (API_PFX, self.jim.email),
{}, self.client, None)
self.assertEquals(response.status_code, 200)
class SpeciesListTest(OTMTestCase):
def setUp(self):
self.instance = setupTreemapEnv()
self.user = User.objects.get(username="jim")
def test_species_list_endpoint(self):
response = get_signed(
self.client,
"%s/instance/%s/species" % (API_PFX,
self.instance.url_name),
user=self.user)
self.assertEqual(response.status_code, 200)
| agpl-3.0 | 5,247,549,353,910,923,000 | 36.060043 | 79 | 0.577071 | false |
danvk/webdiff | webdiff/dirdiff.py | 1 | 2757 | '''Compute the diff between two directories on local disk.'''
from collections import defaultdict
import copy
import os
from webdiff.localfilediff import LocalFileDiff
from webdiff import util
def diff(a_dir, b_dir):
pairs = find_diff(a_dir, b_dir)
moves, pairs = find_moves(pairs)
diffs = [LocalFileDiff(a_dir, a, b_dir, b, False) for a, b in pairs] + [
LocalFileDiff(a_dir, a, b_dir, b, True) for a, b in moves
]
# sort "change" before "delete" in a move, which is easier to understand.
diffs.sort(key=lambda d: (d.a_path, 0 if d.b else 1))
return diffs
def find_diff(a, b):
"""Walk directories a and b and pair off files.
Returns a list of pairs of full paths to matched a/b files.
"""
def list_files(top_dir):
file_list = []
for root, _, files in os.walk(top_dir):
root = os.path.relpath(root, start=top_dir)
for name in files:
file_list.append(os.path.join(root, name))
return file_list
assert os.path.isdir(a)
assert os.path.isdir(b)
a_files = list_files(a)
b_files = list_files(b)
pairs = pair_files(a_files, b_files)
def safejoin(d, p):
if p == '':
return ''
return os.path.join(d, p)
return [(safejoin(a, arel), safejoin(b, brel)) for arel, brel in pairs]
def pair_files(a_files, b_files):
'''Paths must be relative to the diff root for each side.'''
pairs = []
for f in a_files[:]:
if f in b_files:
i = a_files.index(f)
j = b_files.index(f)
pairs.append((f, f))
del a_files[i]
del b_files[j]
else:
pairs.append((f, '')) # delete
for f in b_files:
pairs.append(('', f)) # add
return pairs
def find_moves(pairs):
"""Separate the file move pairs from other file pairs"""
# If a file is just moved, then the added file and the deleted file
# will both put their idx into the same key of the dictionary
add_delete_pairs = defaultdict(lambda: [None, None])
for idx, (a, b) in enumerate(pairs):
if b and not a: # add
add_delete_pairs[util.contentHash(b)][1] = idx
elif a and not b: # delete
add_delete_pairs[util.contentHash(a)][0] = idx
indices_to_omit = []
moves = []
for _, (aIdx, bIdx) in add_delete_pairs.items():
if (aIdx is not None) and (bIdx is not None):
# replace the "add" and "delete" with a "change"
indices_to_omit.extend([aIdx, bIdx])
moves.append((pairs[aIdx][0], pairs[bIdx][1]))
remaining_pairs = [pair for i, pair in enumerate(pairs) if i not in indices_to_omit]
return moves, remaining_pairs
| apache-2.0 | -6,129,527,204,904,868,000 | 28.021053 | 88 | 0.587595 | false |
XiaoTaoWang/TADLib | tadlib/visualize/heatmaps.py | 1 | 7770 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 28 19:38:21 2018
@author: XiaoTao Wang
"""
import itertools, cooler
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
class Triangle(object):
def __init__(self, uri, chrom, start, end, correct='weight', figsize=(7, 3.5)):
self.clr = cooler.Cooler(uri)
self.res = self.clr.binsize
fig = plt.figure(figsize=figsize)
self.fig = fig
self.chrom = chrom
self.start = start
self.end = end
M = self.clr.matrix(balance=correct, sparse=False).fetch((chrom, start, end))
M[np.isnan(M)] = 0
self.matrix = M
# define my colormap (traditional w --> r)
self.cmap = LinearSegmentedColormap.from_list('interaction',
['#FFFFFF','#FFDFDF','#FF7575','#FF2626','#F70000'])
def print_coordinate(self, pos):
i_part = int(pos) // 1000000 # Integer Part
d_part = (int(pos) % 1000000) // 1000 # Decimal Part
if (i_part > 0) and (d_part > 0):
return ''.join([str(i_part), 'M', str(d_part), 'K'])
elif (i_part == 0):
return ''.join([str(d_part), 'K'])
else:
return ''.join([str(i_part), 'M'])
def matrix_plot(self, colormap='traditional', vmin=None, vmax=None, cbr_fontsize=9,
nticks=4, label_size=9, remove_label=False, heatmap_pos=[0.1, 0.1, 0.8, 0.8],
colorbar_pos=[0.08, 0.45, 0.02, 0.15], chrom_pos=[0.1, 0.08, 0.8, 0.015]):
h_ax = self.fig.add_axes(heatmap_pos)
c_ax = self.fig.add_axes(colorbar_pos)
M = self.matrix
n = M.shape[0]
# Create the rotation matrix
t = np.array([[1,0.5], [-1,0.5]])
A = np.dot(np.array([(i[1],i[0]) for i in itertools.product(range(n,-1,-1),range(0,n+1,1))]),t)
if colormap=='traditional':
cmap = self.cmap
else:
cmap = colormap
# Plot the Heatmap ...
x = A[:,1].reshape(n+1, n+1)
y = A[:,0].reshape(n+1, n+1)
y[y<0] = -y[y<0]
if vmax is None:
vmax = np.percentile(M[M.nonzero()], 95)
if vmin is None:
vmin = M.min()
sc = h_ax.pcolormesh(x, y, np.flipud(M), vmin=vmin, vmax=vmax, cmap=cmap,
edgecolor='none', snap=True, linewidth=.001)
# colorbar
cbar = self.fig.colorbar(sc, cax=c_ax, ticks=[vmin, vmax], format='%.3g')
c_ax.tick_params(labelsize=cbr_fontsize)
# Hide the bottom part
xmin = A[:,1].min()
xmax = A[:,1].max()
ymin = A[:,0].min()
ymax = 0
h_ax.fill([xmin, xmax, xmax, xmin], [ymin, ymin, ymax, ymax], 'w', ec='none')
h_ax.axis('off')
# chromosome bar
if not remove_label:
chrom_ax = self.fig.add_axes(chrom_pos)
chrom_ax.tick_params(axis='both', bottom=True, top=False, left=False,
right=False, labelbottom=True, labeltop=False,
labelleft=False, labelright=False)
interval = (self.end - self.start) // self.res
ticks = list(np.linspace(0, interval, nticks).astype(int))
pos = list(np.linspace(self.start, self.end, nticks).astype(int))
labels = [self.print_coordinate(p) for p in pos]
chrom_ax.set_xticks(ticks)
if len(ticks) < 7:
chrom_ax.set_xticklabels(labels, fontsize=label_size)
else:
chrom_ax.set_xticklabels(labels, fontsize=label_size, rotation=15, ha='right')
chrom_ax.set_xlabel('chr'+self.chrom.lstrip('chr'), fontsize=label_size+2)
chrom_ax.set_xlim(ticks[0], ticks[-1])
chrom_ax.set_ylim(0, 0.02)
self.heatmap_ax = h_ax
self.cbar_ax = c_ax
self.chrom_ax = chrom_ax
self.hx = x
self.hy = y
def plot_loops(self, loop_file, marker_size=50, marker_color='#111111', marker_type='o',
marker_alpha=0.5):
loopType = np.dtype({'names':['chr','start1','end1','start2','end2'],
'formats':['U5', np.int, np.int, np.int, np.int]})
loops = np.loadtxt(loop_file, dtype=loopType, usecols=[0,1,2,4,5])
loops = loops[(loops['chr']==self.chrom)]
test_x = loops['start1']
test_y = loops['end2']
mask = (test_x >= self.start) & (test_y < self.end)
loops = loops[mask]
n = self.matrix.shape[0]
# mark the loop loci
Bool = np.zeros((n, n), dtype=bool)
for xs, xe, ys, ye in zip(loops['start1'], loops['end1'], loops['start2'], loops['end2']):
# Lodate the loop pixel at given resolution
s_l = range(xs//self.res-1, int(np.ceil(xe/float(self.res)))+1)
e_l = range(ys//self.res-1, int(np.ceil(ye/float(self.res)))+1)
si, ei = None, None
for i in s_l:
for j in e_l:
st = i - self.start//self.res
et = j - self.start//self.res
if (st < n) and (et < n):
if si is None:
si, ei = st, et
else:
if self.matrix[st,et] > self.matrix[si,ei]:
si, ei = st, et
if not si is None:
Bool[si, ei] = 1
#Bool[ei, si] = 1
lx = self.hx[:-1,:-1][np.flipud(Bool)]
ly = self.hy[:-1,:-1][np.flipud(Bool)] + 1
if lx.size > 0:
self.heatmap_ax.scatter(lx, ly, s=marker_size, c=marker_color, marker=marker_type,
alpha=marker_alpha)
self.heatmap_ax.set_xlim(self.hx.min(), self.hx.max())
self.heatmap_ax.set_ylim(self.hy.min(), self.hy.max())
self.loops = loops
def plot_TAD(self, tad_fil, line_color='#60636A', linewidth=3, line_style='-'):
tadtype = np.dtype({'names':['chr','start','end'],
'formats':['U5', np.int, np.int]})
tads = np.loadtxt(tad_fil, dtype=tadtype, usecols=[0,1,2])
tads = tads[(tads['chr']==self.chrom)]
mask = (tads['end'] > self.start) & (tads['start'] < self.end)
tads = tads[mask]
n = self.matrix.shape[0]
for s, e in zip(tads['start'], tads['end']):
si = s // self.res - self.start // self.res
ei = e // self.res - self.start // self.res
if si < 0:
si = 0
if ei > n - 1:
ei = n - 1
if ei - si < 2:
continue
x = [self.hx[:-1, :-1][n-1-si, si],
self.hx[:-1, :-1][n-1-si, ei],
self.hx[:-1, :-1][n-1-ei, ei]]
y = [self.hy[:-1, :-1][n-1-si, si] - 1,
self.hy[:-1, :-1][n-1-si, ei] + 1,
self.hy[:-1, :-1][n-1-ei, ei] - 1]
self.heatmap_ax.plot(x, y, color=line_color, linestyle=line_style,
linewidth=linewidth)
self.heatmap_ax.set_xlim(self.hx.min(), self.hx.max())
self.heatmap_ax.set_ylim(self.hy.min(), self.hy.max())
self.tads = tads
def outfig(self, outfile, dpi=200, bbox_inches='tight'):
self.fig.savefig(outfile, dpi=dpi, bbox_inches=bbox_inches)
def show(self):
self.fig.show()
| gpl-3.0 | 8,681,426,830,051,000,000 | 34.824645 | 103 | 0.484299 | false |
elssar/calibre | src/calibre/gui2/preferences/emailp.py | 14 | 10118 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import textwrap
from PyQt5.Qt import QAbstractTableModel, QFont, Qt
from calibre.gui2.preferences import ConfigWidgetBase, test_widget, \
AbortCommit
from calibre.gui2.preferences.email_ui import Ui_Form
from calibre.utils.config import ConfigProxy
from calibre.utils.icu import numeric_sort_key
from calibre.gui2 import gprefs
from calibre.utils.smtp import config as smtp_prefs
class EmailAccounts(QAbstractTableModel): # {{{
def __init__(self, accounts, subjects, aliases={}):
QAbstractTableModel.__init__(self)
self.accounts = accounts
self.subjects = subjects
self.aliases = aliases
self.sorted_on = (0, True)
self.account_order = self.accounts.keys()
self.do_sort()
self.headers = map(unicode, [_('Email'), _('Formats'), _('Subject'),
_('Auto send'), _('Alias')])
self.default_font = QFont()
self.default_font.setBold(True)
self.default_font = (self.default_font)
self.tooltips =[None] + list(map(unicode, map(textwrap.fill,
[_('Formats to email. The first matching format will be sent.'),
_('Subject of the email to use when sending. When left blank '
'the title will be used for the subject. Also, the same '
'templates used for "Save to disk" such as {title} and '
'{author_sort} can be used here.'),
'<p>'+_('If checked, downloaded news will be automatically '
'mailed <br>to this email address '
'(provided it is in one of the listed formats).'),
_('Friendly name to use for this email address')
])))
def do_sort(self):
col = self.sorted_on[0]
if col == 0:
def key(account_key):
return numeric_sort_key(account_key)
elif col == 1:
def key(account_key):
return numeric_sort_key(self.accounts[account_key][0] or '')
elif col == 2:
def key(account_key):
return numeric_sort_key(self.subjects.get(account_key) or '')
elif col == 3:
def key(account_key):
return numeric_sort_key(type(u'')(self.accounts[account_key][0]) or '')
elif col == 4:
def key(account_key):
return numeric_sort_key(self.aliases.get(account_key) or '')
self.account_order.sort(key=key, reverse=not self.sorted_on[1])
def sort(self, column, order=Qt.AscendingOrder):
nsort = (column, order == Qt.AscendingOrder)
if nsort != self.sorted_on:
self.sorted_on = nsort
self.beginResetModel()
try:
self.do_sort()
finally:
self.endResetModel()
def rowCount(self, *args):
return len(self.account_order)
def columnCount(self, *args):
return len(self.headers)
def headerData(self, section, orientation, role):
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
return self.headers[section]
return None
def data(self, index, role):
row, col = index.row(), index.column()
if row < 0 or row >= self.rowCount():
return None
account = self.account_order[row]
if account not in self.accounts:
return None
if role == Qt.UserRole:
return (account, self.accounts[account])
if role == Qt.ToolTipRole:
return self.tooltips[col]
if role in [Qt.DisplayRole, Qt.EditRole]:
if col == 0:
return (account)
if col == 1:
return (self.accounts[account][0])
if col == 2:
return (self.subjects.get(account, ''))
if col == 4:
return (self.aliases.get(account, ''))
if role == Qt.FontRole and self.accounts[account][2]:
return self.default_font
if role == Qt.CheckStateRole and col == 3:
return (Qt.Checked if self.accounts[account][1] else Qt.Unchecked)
return None
def flags(self, index):
if index.column() == 3:
return QAbstractTableModel.flags(self, index)|Qt.ItemIsUserCheckable
else:
return QAbstractTableModel.flags(self, index)|Qt.ItemIsEditable
def setData(self, index, value, role):
if not index.isValid():
return False
row, col = index.row(), index.column()
account = self.account_order[row]
if col == 3:
self.accounts[account][1] ^= True
elif col == 2:
self.subjects[account] = unicode(value or '')
elif col == 4:
self.aliases.pop(account, None)
aval = unicode(value or '').strip()
if aval:
self.aliases[account] = aval
elif col == 1:
self.accounts[account][0] = unicode(value or '').upper()
elif col == 0:
na = unicode(value or '')
from email.utils import parseaddr
addr = parseaddr(na)[-1]
if not addr:
return False
self.accounts[na] = self.accounts.pop(account)
self.account_order[row] = na
if '@kindle.com' in addr:
self.accounts[na][0] = 'AZW, MOBI, TPZ, PRC, AZW1'
self.dataChanged.emit(
self.index(index.row(), 0), self.index(index.row(), 3))
return True
def make_default(self, index):
if index.isValid():
self.beginResetModel()
row = index.row()
for x in self.accounts.values():
x[2] = False
self.accounts[self.account_order[row]][2] = True
self.endResetModel()
def add(self):
x = _('new email address')
y = x
c = 0
while y in self.accounts:
c += 1
y = x + str(c)
auto_send = len(self.accounts) < 1
self.beginResetModel()
self.accounts[y] = ['MOBI, EPUB', auto_send,
len(self.account_order) == 0]
self.account_order = self.accounts.keys()
self.do_sort()
self.endResetModel()
return self.index(self.account_order.index(y), 0)
def remove(self, index):
if index.isValid():
row = index.row()
account = self.account_order[row]
self.accounts.pop(account)
self.account_order = sorted(self.accounts.keys())
has_default = False
for account in self.account_order:
if self.accounts[account][2]:
has_default = True
break
if not has_default and self.account_order:
self.accounts[self.account_order[0]][2] = True
self.beginResetModel()
self.endResetModel()
# }}}
class ConfigWidget(ConfigWidgetBase, Ui_Form):
supports_restoring_to_defaults = False
def genesis(self, gui):
self.gui = gui
self.proxy = ConfigProxy(smtp_prefs())
r = self.register
r('add_comments_to_email', gprefs)
self.send_email_widget.initialize(self.preferred_to_address)
self.send_email_widget.changed_signal.connect(self.changed_signal.emit)
opts = self.send_email_widget.smtp_opts
self._email_accounts = EmailAccounts(opts.accounts, opts.subjects,
opts.aliases)
self._email_accounts.dataChanged.connect(lambda x,y:
self.changed_signal.emit())
self.email_view.setModel(self._email_accounts)
self.email_view.sortByColumn(0, Qt.AscendingOrder)
self.email_view.setSortingEnabled(True)
self.email_add.clicked.connect(self.add_email_account)
self.email_make_default.clicked.connect(self.make_default)
self.email_view.resizeColumnsToContents()
self.email_remove.clicked.connect(self.remove_email_account)
def preferred_to_address(self):
if self._email_accounts.account_order:
return self._email_accounts.account_order[0]
def initialize(self):
ConfigWidgetBase.initialize(self)
# Initializing all done in genesis
def restore_defaults(self):
ConfigWidgetBase.restore_defaults(self)
# No defaults to restore to
def commit(self):
if self.email_view.state() == self.email_view.EditingState:
# Ensure that the cell being edited is committed by switching focus
# to some other widget, which automatically closes the open editor
self.send_email_widget.setFocus(Qt.OtherFocusReason)
to_set = bool(self._email_accounts.accounts)
if not self.send_email_widget.set_email_settings(to_set):
raise AbortCommit('abort')
self.proxy['accounts'] = self._email_accounts.accounts
self.proxy['subjects'] = self._email_accounts.subjects
self.proxy['aliases'] = self._email_accounts.aliases
return ConfigWidgetBase.commit(self)
def make_default(self, *args):
self._email_accounts.make_default(self.email_view.currentIndex())
self.changed_signal.emit()
def add_email_account(self, *args):
index = self._email_accounts.add()
self.email_view.setCurrentIndex(index)
self.email_view.resizeColumnsToContents()
self.email_view.edit(index)
self.changed_signal.emit()
def remove_email_account(self, *args):
idx = self.email_view.currentIndex()
self._email_accounts.remove(idx)
self.changed_signal.emit()
def refresh_gui(self, gui):
from calibre.gui2.email import gui_sendmail
gui_sendmail.calculate_rate_limit()
if __name__ == '__main__':
from PyQt5.Qt import QApplication
app = QApplication([])
test_widget('Sharing', 'Email')
| gpl-3.0 | -2,747,201,342,243,837,000 | 36.198529 | 87 | 0.581933 | false |
Jgarcia-IAS/localizacion | openerp/addons-extra/odoo-pruebas/odoo-server/addons-extra/account_transfer_advance/wizard/__init__.py | 7 | 1514 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import purchase_advance_transfer
import sale_advance_transfer
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,669,471,566,412,868,600 | 44.878788 | 78 | 0.690885 | false |
jroivas/odt-html5 | odt.py | 1 | 23990 | import zipfile
import os
import xml.etree.ElementTree as etree
import re
import copy
class ODTPage:
def __init__(self, name, odt=None, pagename='page', indexname='index'):
self.pagename = pagename
self.indexname = indexname
if odt is None:
self.odt = ODT(name, pagename=pagename)
else:
self.odt = odt
self.index = []
def pages(self):
return self.odt.pageCount()
def getTitle(self):
for i in self.odt.titles:
return (i, self.odt.titles[i][0])
return (0, '')
def solveCurrentPage(self, page):
pages = self.odt.pageCount()
if page > pages:
page = pages
if page < 1:
page = 1
return page
def handlePageTitle(self, page):
(level, page_title) = self.getTitle()
if page_title:
self.index.append((level, page, page_title))
return page_title
def getPage(self, page=1, title="ODT", prev_page=True):
self.odt.reset()
styles = ''
page = self.solveCurrentPage(page)
content = self.getContent(self.odt, page)
body = self.getBody(self.odt, page, content, prev_page, title)
page_title = self.handlePageTitle(page)
if page_title is not None:
title += ' - ' + page_title
head = self.getHeader(title, styles)
foot = self.getFooter()
return page_title, content, head + body + foot
def genIndex(self, title, extra):
res = '<body>\n'
res += extra
res += '<div class="page">\n'
for level, page, target in self.index:
res += '<div>%s<a href="%s_%s.html">%s</a></div>\n' % (' ' * 2 * int(level), self.pagename, page, target)
res += '</div>\n'
res += '</body>\n'
head = self.getHeader(title, '')
foot = self.getFooter()
return head + res + foot
def getHeader(self, title, extra=""):
return """<html>
<head>
<title>%s</title>
<link rel="stylesheet" type="text/css" title="styles" href="odt.css"/>
<meta charset="UTF-8">
<script type="text/javascript" src="jquery.min.js"></script>
<script type="text/javascript" src="odt.js"></script>
%s
</head>
""" % (title, extra)
def getContent(self, odt, page):
res = odt.read()
tmp = ''
if not res:
return "<p>Invalid file</p>"
tmp = odt.parseContent(page=page)
return """
<!-- START -->
<div class="page">
%s
</div>
<!-- END -->
""" % (''.join(tmp))
def getBody(self, odt, page, content, prev_page, title):
cntx = ''
cntx += '<a href="%s.html"><div id="top_left">%s</div></a>\n' % (self.indexname, title)
if prev_page and page > 1:
if prev_page == True:
prev_page = "%s_%s.html" % (self.pagename, page - 1)
cntx += """
<!-- PREV --><a href="%s">
<div id='prevPage'>
<<
</div></a>
""" % (prev_page)
cntx += """
<input type='hidden' id='pagenum' name='pagenum' value='%s'></input>
<input type='hidden' id='pagecnt' name='pagecnt' value='%s'></input>
""" % (page, odt.pageCount())
cntx += "<div id='pageDiv'>\n"
cntx += content
cntx += "</div>\n"
if page < odt.pageCount():
cntx += """
<!-- NEXT --><a href="%s_%s.html">
<div id='nextPage'>
>>
</div>
</a>
""" % (self.pagename, page + 1)
return """
<body>
%s
</body>
""" % (cntx)
def getFooter(self):
return """</html>"""
class ODT:
def __init__(self, name, pagename):
self._name = name
self._pagename = pagename
self._page = 1
self._zip = None
self._styles = {}
self._styles_xml = None
self._content_xml = None
self._stylename = None
self._read = False
self._read = self.read()
self._pagecnt = None
self._lists = {}
self._hlevels = {}
self._localtargets = {}
self._framedata = None
self._listname = None
self._tab = None
self._stylestack = []
self._imageframe1 = ''
self._imageframe1_end = ''
self._imageframe2 = ''
self._imageframe2_end = ''
self.images = []
self.titles = {}
#self._pagedata = {}
self.rendered_width = 0
def reset(self):
self.titles = {}
self._page = 1
def open(self):
if not os.path.isfile(self._name):
self._zip = None
return False
try:
self._zip = zipfile.ZipFile(self._name, 'r')
except zipfile.BadZipfile:
self._zip = None
return False
return True
def close(self):
self._zip.close()
def extract(self, file):
if self._zip == None:
return None
try:
return self._zip.read(file)
except KeyError:
return None
def cleanTag(self, tag):
return re.sub("{[^}]+}","",tag).strip()
def findElement(self, root, name):
res = []
#if self.cleanTag(root.tag) == name:
if root.tag == name:
res.append(root)
for child in root:
if child.tag == name:
res.append(child)
tmp = self.findElement(child, name)
for item in tmp:
if item not in res:
res.append(item)
return res
def parseStyleTag(self, styles):
res = {}
for style in styles:
tmp = self.getAttrib(style, "name")
if tmp is not None:
res[tmp] = {}
self._stylename = tmp
elif self._stylename not in res:
res[self._stylename] = {}
pstyle = self.getAttrib(style, "parent-style-name")
if pstyle is not None and res is not None:
res[self._stylename]["parent"] = pstyle
text_prop = self.parseTextProperties(style)
if text_prop:
res[self._stylename]["text-prop"] = text_prop
para_prop = self.parseParagraphProperties(style)
if para_prop:
res[self._stylename]["para-prop"] = para_prop
return res
def filterAttributes(self, props, keep):
res = []
for prop in props:
style = {}
for val in prop.attrib:
if val in keep:
style[val] = prop.attrib[val]
if style:
res.append(style)
if len(res) == 1:
return res[0]
return res
def parseTextPropertyTag(self, props):
valid_text_attrs = ["font-size", "color", "background-color", "font-weight",
"font-style", "text-underline-style", "text-underline-color",
"text-overline-style", "text-line-through-style" ]
return self.filterAttributes(props, valid_text_attrs)
def parseParagraphPropertyTag(self, props):
valid_para_attrs = [ "break-before", "text-align", "color", "background-color",
"text-indent", "margin-left", "margin-right", "margin-top", "margin-bottom" ]
return self.filterAttributes(props, valid_para_attrs)
def getAttrib(self, tag, name):
for attrib in tag.attrib:
#if self.cleanTag(attrib)==name:
if attrib == name:
return tag.attrib[attrib]
return None
def stripNamespace(self, root):
for el in root.getiterator():
if el.tag[0] == "{":
el.tag = el.tag.split('}', 1)[1]
tmp = {}
for attr in el.attrib:
if attr[0] == "{":
tmp[attr.split('}', 1)[1]] = el.attrib[attr]
else:
tmp[attr] = el.attrib[attr]
el.attrib = tmp
def parseStyleXML(self):
if self._styles_xml == None:
return None
self._style_root = etree.fromstring(self._styles_xml)
self.stripNamespace(self._style_root)
def parseContentXML(self):
if self._content_xml == None:
return None
self._content_root = etree.fromstring(self._content_xml)
self.stripNamespace(self._content_root)
def parseXML(self):
self.parseStyleXML()
self.parseContentXML()
def parseParagraphProperties(self, item=None):
if item is None:
item = self._style_root
tags = self.findElement(item, "paragraph-properties")
return self.parseParagraphPropertyTag(tags)
def parseTextProperties(self, item=None):
if item is None:
item = self._style_root
tags = self.findElement(item, "text-properties")
return self.parseTextPropertyTag(tags)
def parseStyles(self):
styles = self.findElement(self._style_root, "style")
return self.parseStyleTag(styles)
def getAttrib(self, item, attr):
if not attr in item.attrib:
return None
return item.attrib[attr]
def parseContent(self, page=0):
return self.parseTag(self._content_root, page=page),
def parseStyle(self, style, item):
res = ""
extra = False
got_tab = False
if item == "tab" and "tab" in style:
if style["tab"]["pos"] is not None:
res += "margin-left: %s;" % (style["tab"]["pos"])
got_tab = True
if "text-prop" in style:
for key in style["text-prop"]:
if extra:
res += " "
extra = True
if key == "text-underline-style":
res += "text-decoration: underline;"
elif key == "text-overline-style":
res += "text-decoration: overline;"
elif key == "text-line-through-style":
res += "text-decoration: line-through;"
elif not got_tab or key != "margin-left":
res += "%s: %s;" % (key, style["text-prop"][key].strip())
if "para-prop" in style:
for key in style["para-prop"]:
if extra:
res += " "
extra = True
if key == "text-indent":
res += "padding-left: %s;" % (style["para-prop"][key].strip())
elif key == "break-before":
pass
elif not got_tab or key != "margin-left":
res += "%s: %s;" % (key, style["para-prop"][key].strip())
"""
if item == "tab" and "tab" in style:
if style["tab"]["pos"] is not None:
res += "margin-left: %s;" % (style["tab"]["pos"])
"""
return res
def isBreak(self, style):
if style is None:
return False
if not "para-prop" in style:
return False
if "break-before" in style["para-prop"] and style["para-prop"]["break-before"] == "page":
return True
return False
def isInternalLink(self, link):
if link[0] == "#":
return True
return False
def parseInternalLink(self, link):
if link[0] != "#":
return link
data = link[1:].split("|")
return data[0]
def parseLink(self, link):
intlink = self.parseInternalLink(link)
if intlink in self._localtargets:
page = '%s_%s.html' % (self._pagename, self._localtargets[intlink]) + '#'
else:
page = ''
return page+intlink
def setupLevel(self, level):
nlevel = int(level)
if not level in self._hlevels:
self._hlevels[level] = 0
self._hlevels[level] += 1
tmp = nlevel + 1
while tmp <= 6:
self._hlevels["%s" % tmp] = 0
tmp += 1
def levelLabel(self, level):
lab = ""
tmp = 1
while tmp < 6:
levnum = self._hlevels["%s" % tmp]
if levnum == 0:
break
lab += "%s." % (levnum)
tmp += 1
return lab
def solveStyle(self, item, style=None):
combined = {}
if style is None:
style = self.getAttrib(item, "style-name")
styledata = self.getStyle(style)
extra = ""
if styledata is not None:
cstyledata = copy.deepcopy(styledata)
# Solve style stack
stack = [cstyledata]
pstack = []
while cstyledata is not None and "parent" in cstyledata:
parstyle = cstyledata["parent"]
#if parstyle in pstack:
# break
pstack.append(parstyle)
pardata = self.getStyle(parstyle)
if pardata is not None:
stack.append(copy.deepcopy(pardata))
cstyledata = pardata
solved_style = {}
while stack:
data = stack.pop()
tmp = {}
tmp[style] = data
self.mergeStyles(tmp, solved_style)
parsedstyle = self.parseStyle(solved_style[style], item.tag)
if parsedstyle:
extra = ' style="%s"' % (parsedstyle)
return extra
def handleTail(self, item):
if item.tail is not None:
return item.tail
return ""
def mergeStyles(self, updater, dest=None):
if not updater:
return
if dest is None:
dest = self._styles
for k in updater:
if not k in dest:
dest[k] = updater[k]
else:
if "text-prop" in updater[k]:
if not "text-prop" in dest[k]:
dest[k]["text-prop"] = {}
dest[k]["text-prop"].update(updater[k]["text-prop"])
if "para-prop" in updater[k]:
if not "para-prop" in dest[k]:
dest[k]["para-prop"] = {}
dest[k]["para-prop"].update(updater[k]["para-prop"])
if "parent" in updater[k]:
dest[k]["parent"] = updater[k]["parent"]
if "tab" in updater[k]:
dest[k]["tab"] = updater[k]["tab"]
def tidyParentStyle(self, parentstyle):
if not parentstyle:
return None
t = parentstyle.pop()
while parentstyle and (t is None or not t or t == ''):
t = parentstyle.pop()
return t
def parseTag(self, item, page=1, parentstyle=[]):
listname = None
res = ""
res_start = ''
res_close = ''
style = self.getAttrib(item, "style-name")
styledata = self.getStyle(style)
if self.isBreak(styledata):
self._page += 1
if self._page != page:
tmp = ''
for ch in item:
tmp += self.parseTag(ch, page=page, parentstyle=parentstyle[:] + [style])
return res + tmp
if item.tag == "list-style":
listname = self.getAttrib(item, "name")
if not listname in self._styles:
self._styles[listname] = {}
self._listname = listname
elif item.tag == "list-level-style-bullet":
bullet = self.getAttrib(item, "bullet-char")
if self._listname is not None:
self._styles[self._listname]["bullet"] = bullet
elif item.tag == "paragraph-properties" or item.tag == "text-properties":
extra = self.parseStyleTag([item])
self.mergeStyles(extra)
elif item.tag == "style":
self._stylename = self.getAttrib(item, "name")
self._stylestack.append(self._stylename)
self._parentname = self.getAttrib(item, "parent-style-name")
elif item.tag == "list":
stylename = self.getAttrib(item, "style-name")
style = self.getStyle(stylename)
s = None
if stylename is None:
style = self.tidyParentStyle(parentstyle)
if style is not None and "bullet" in style:
res += "<ul>"
res_close += "</ul>"
else:
res += "<ol>"
res_close += "</ol>"
elif item.tag == "list-item":
res += "<li>"
res_close += "</li>"
elif item.tag == "a":
href = self.getAttrib(item, "href")
if href is not None:
extra = self.solveStyle(item)
res += '<a href="%s"%s>' % (self.parseLink(href), extra)
res_close += "</a>"
elif item.tag == "frame":
frame = {}
frame["style"] = self.solveStyle(item)
frame["anchor"] = self.getAttrib(item, "anchor")
frame["width"] = self.getAttrib(item, "width")
frame["height"] = self.getAttrib(item, "height")
self._framedata = frame
elif item.tag == "imageframe":
style = self.solveStyle(item)
self._imageframe1 = '<span%s>' % (extra)
self._imageframe1_end = '</span>'
self._imageframe2 = '<div%s>' % (extra)
self._imageframe2_end = '</div>'
elif item.tag == "image":
href = self.getAttrib(item, "href")
if href is not None:
self.images.append(href)
if self._framedata is not None:
img_styles = ""
p_styles = ""
if self._framedata["width"] is not None:
img_styles += "width: %s;" % (self._framedata["width"])
_anchor = self._framedata["anchor"]
if self._framedata["height"] is not None:
img_styles += "height: %s;" % (self._framedata["height"])
if _anchor == "paragraph":
p_styles += "margin-bottom: -%s;" % (self._framedata["height"])
imgextra = ""
if img_styles:
imgextra = ' style="%s;"' % (img_styles)
extra = ""
if p_styles:
extra = ' style="%s"' % (p_styles)
src = "img/%s" % (href)
imgdata = '<img src="%s"%s>' % (src, imgextra)
imgdata_end = '</img>'
if _anchor == "as-is":
res += self._imageframe1
res += imgdata
res_close += imgdata_end
res_close += self._imageframe1_end
else:
res += self._imageframe2
res += imgdata
res_close += imgdata_end
res_close += self._imageframe2_end
#if _anchor == "as-is":
# res += '<span%s>%s</span>' % (extra, imgdata)
#else:
# res += '<div%s>%s</div>' % (extra, imgdata)
elif item.tag == "tab-stop":
tab = {}
tab["pos"] = self.getAttrib(item, "position")
tab["type"] = self.getAttrib(item, "type")
tab["leader-style"] = self.getAttrib(item, "leader-style")
tab["leader-text"] = self.getAttrib(item, "leader-text")
self._tab = tab
if self._stylename is not None:
self._styles[self._stylename]["tab"] = tab
elif item.tag == "tab":
s = self.tidyParentStyle(parentstyle)
style = self.solveStyle(item, self._stylename)
if s is not None:
style = self.solveStyle(item, s)
res += "<span%s>%s</span>" % (style, ' ' * 9)
elif item.tag == "span":
style = self.solveStyle(item)
res += "<span%s>" % (style)
res_close += "</span>"
elif item.tag == "h":
level = self.getAttrib(item, "outline-level")
if level is None:
level = "1"
style = self.solveStyle(item)
self.setupLevel(level)
lab = self.levelLabel(level)
self._localtargets[lab] = self.page()
if item.text is not None:
lab += item.text
self._localtargets[lab] = self.page()
res += '<h%s%s><a name="%s"></a>' % (level, style, lab)
if level not in self.titles:
self.titles[level] = []
self.titles[level].append(item.text)
res_close += "</h%s>\n" % (level)
elif item.tag == "p":
extra = self.solveStyle(item)
snam = self.getAttrib(item, "style-name")
if snam is not None:
pah = ' class="%s"' % (snam)
else:
pah = ''
#if item.text is None or item.text == "":
# res += "<div class='emptyline'> </div>\n"
#else:
res_start += "<div%s%s>" % (extra, pah)
res_close += "</div>\n"
subdata = ''
for ch in item:
tmp_b = self.parseTag(ch, page=page, parentstyle=parentstyle[:] + [style])
if tmp_b:
subdata += tmp_b
tmp_f = ''
if item.tag == 'p' and not subdata and item.text is None:
res += "<div class='emptyline'> </div>\n"
else:
res += res_start
if item.text is not None:
res += item.text
res += subdata
#if self._page == page:
# print self._page, page, tmp
#res += '%s' % ''.join(tmp_b)
res += res_close
if res is not None:
res += self.handleTail(item)
if item.tag == "frame":
self._framedata = None
elif item.tag == "style":
self._stylestack.pop()
if self._stylestack:
self._stylename = self._stylestack[-1]
else:
self._stylename = None
elif item.tag == "imageframe":
self._imageframe1 = ''
self._imageframe1_end = ''
self._imageframe2 = ''
self._imageframe2_end = ''
#elif item.tag == "tab-stop":
# self._tab = None
#print 'res', page, res
#if page not in self._pagedata:
# self._pagedata[page] = ''
#self._pagedata[page] += res
#res += '\n<!--end-->\n</div>\n'
return res
def getStyle(self, name):
if name in self._styles:
return self._styles[name]
return None
def page(self):
return self._page
def pageCount(self):
if self._pagecnt is not None:
return self._pagecnt
pagecnt = 1
for item in self._content_root.getiterator():
if item.tag == "style":
extra = self.parseStyleTag([item])
self.mergeStyles(extra)
if "style-name" in item.attrib:
st = self.getStyle(item.attrib["style-name"])
if st is not None and "para-prop" in st:
if self.isBreak(st):
pagecnt += 1
self._pagecnt = pagecnt
return pagecnt
def read(self):
if self._read:
return True
if not self.open():
return False
self._styles_xml = self.extract("styles.xml")
self._content_xml = self.extract("content.xml")
self.parseXML()
self._styles = self.parseStyles()
self.close()
return True
| mit | 2,451,137,702,651,230,000 | 32.412256 | 122 | 0.478199 | false |
sam-m888/gramps | gramps/gen/filters/rules/media/_matchessourceconfidence.py | 5 | 1869 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Jerome Rapinat
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Filters/Rules/Person/_MatchesSourceConfidence.py
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._matchessourceconfidencebase import MatchesSourceConfidenceBase
#-------------------------------------------------------------------------
# "Confidence level"
#-------------------------------------------------------------------------
class MatchesSourceConfidence(MatchesSourceConfidenceBase):
"""Media matching a specific confidence level on its 'direct' source references"""
labels = [_('Confidence level:')]
name = _('Media with a direct source >= <confidence level>')
description = _("Matches media with at least one direct source with confidence level(s)")
| gpl-2.0 | -3,949,259,287,551,906,000 | 40.533333 | 93 | 0.574639 | false |
Komzpa/GroundHog | tutorials/DT_RNN_Tut_Ex.py | 4 | 16484 | """
Test of the classical LM model for language modelling
"""
from groundhog.datasets import LMIterator
from groundhog.trainer.SGD_momentum import SGD as SGD_m
from groundhog.trainer.SGD import SGD
from groundhog.mainLoop import MainLoop
from groundhog.layers import MultiLayer, \
RecurrentMultiLayer, \
RecurrentMultiLayerInp, \
RecurrentMultiLayerShortPath, \
RecurrentMultiLayerShortPathInp, \
RecurrentMultiLayerShortPathInpAll, \
SoftmaxLayer, \
LastState,\
UnaryOp, \
DropOp, \
Operator, \
Shift, \
GaussianNoise, \
SigmoidLayer
from groundhog.layers import maxpool, \
maxpool_ntimes, \
last, \
last_ntimes,\
tanh, \
sigmoid, \
rectifier,\
hard_sigmoid, \
hard_tanh
from groundhog.models import LM_Model
from theano import scan
import numpy
import theano
import theano.tensor as TT
linear = lambda x:x
rect = lambda x:TT.maximum(0., x)
theano.config.allow_gc = False
def get_text_data(state):
def out_format (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
def out_format_valid (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
train_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
seq_len = state['seqlen'],
mode="train",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format,
can_fit=True)
valid_data = LMIterator(
batch_size=state['bs'],
path=state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences = True,
seq_len= state['seqlen'],
mode="valid",
reset =state['reset'],
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
test_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences=True,
seq_len= state['seqlen'],
mode="test",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
if 'wiki' in state['path']:
test_data = None
return train_data, valid_data, test_data
def jobman(state, channel):
# load dataset
rng = numpy.random.RandomState(state['seed'])
# declare the dimensionalies of the input and output
if state['chunks'] == 'words':
state['n_in'] = 10000
state['n_out'] = 10000
else:
state['n_in'] = 50
state['n_out'] = 50
train_data, valid_data, test_data = get_text_data(state)
## BEGIN Tutorial
### Define Theano Input Variables
x = TT.lvector('x')
y = TT.lvector('y')
h0 = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
### Neural Implementation of the Operators: \oplus
#### Word Embedding
emb_words = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inp_nhids']),
activation=eval(state['inp_activ']),
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
rank_n_approx = state['rank_n_approx'],
scale=state['inp_scale'],
sparsity=state['inp_sparse'],
learn_bias = True,
bias_scale=eval(state['inp_bias']),
name='emb_words')
#### Deep Transition Recurrent Layer
rec = eval(state['rec_layer'])(
rng,
eval(state['nhids']),
activation = eval(state['rec_activ']),
#activation = 'TT.nnet.sigmoid',
bias_scale = eval(state['rec_bias']),
scale=eval(state['rec_scale']),
sparsity=eval(state['rec_sparse']),
init_fn=eval(state['rec_init']),
weight_noise=state['weight_noise'],
name='rec')
#### Stiching them together
##### (1) Get the embedding of a word
x_emb = emb_words(x, no_noise_bias=state['no_noise_bias'])
##### (2) Embedding + Hidden State via DT Recurrent Layer
reset = TT.scalar('reset')
rec_layer = rec(x_emb, n_steps=x.shape[0],
init_state=h0*reset,
no_noise_bias=state['no_noise_bias'],
truncate_gradient=state['truncate_gradient'],
batch_size=1)
## BEGIN Exercise: DOT-RNN
### Neural Implementation of the Operators: \lhd
#### Exercise (1)
#### Hidden state -> Intermediate Layer
emb_state = MultiLayer(
rng,
n_in=eval(state['nhids'])[-1],
n_hids=eval(state['dout_nhid']),
activation=linear,
init_fn=eval(state['dout_init']),
weight_noise=state['weight_noise'],
scale=state['dout_scale'],
sparsity=state['dout_sparse'],
learn_bias = True,
bias_scale=eval(state['dout_bias']),
name='emb_state')
#### Exercise (1)
#### Input -> Intermediate Layer
emb_words_out = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['dout_nhid']),
activation=linear,
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
scale=state['dout_scale'],
sparsity=state['dout_sparse'],
rank_n_approx=state['dout_rank_n_approx'],
learn_bias = False,
bias_scale=eval(state['dout_bias']),
name='emb_words_out')
#### Hidden State: Combine emb_state and emb_words_out
#### Exercise (1)
outhid_activ = UnaryOp(activation=eval(state['dout_activ']))
#### Exercise (2)
outhid_dropout = DropOp(dropout=state['dropout'], rng=rng)
#### Softmax Layer
output_layer = SoftmaxLayer(
rng,
eval(state['dout_nhid']),
state['n_out'],
scale=state['out_scale'],
bias_scale=state['out_bias_scale'],
init_fn="sample_weights_classic",
weight_noise=state['weight_noise'],
sparsity=state['out_sparse'],
sum_over_time=True,
name='out')
### Few Optional Things
#### Direct shortcut from x to y
if state['shortcut_inpout']:
shortcut = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inpout_nhids']),
activations=eval(state['inpout_activ']),
init_fn='sample_weights_classic',
weight_noise = state['weight_noise'],
scale=eval(state['inpout_scale']),
sparsity=eval(state['inpout_sparse']),
learn_bias=eval(state['inpout_learn_bias']),
bias_scale=eval(state['inpout_bias']),
name='shortcut')
#### Learning rate scheduling (1/(1+n/beta))
state['clr'] = state['lr']
def update_lr(obj, cost):
stp = obj.step
if isinstance(obj.state['lr_start'], int) and stp > obj.state['lr_start']:
time = float(stp - obj.state['lr_start'])
new_lr = obj.state['clr']/(1+time/obj.state['lr_beta'])
obj.lr = new_lr
if state['lr_adapt']:
rec.add_schedule(update_lr)
### Neural Implementations of the Language Model
#### Training
if state['shortcut_inpout']:
additional_inputs = [rec_layer, shortcut(x)]
else:
additional_inputs = [rec_layer]
##### Exercise (1): Compute the output intermediate layer
outhid = outhid_activ(emb_state(rec_layer) + emb_words_out(x))
##### Exercise (2): Apply Dropout
outhid = outhid_dropout(outhid)
train_model = output_layer(outhid,
no_noise_bias=state['no_noise_bias'],
additional_inputs=additional_inputs).train(target=y,
scale=numpy.float32(1./state['seqlen']))
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if state['carry_h0']:
train_model.updates += [(h0, nw_h0)]
#### Validation
h0val = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
rec_layer = rec(emb_words(x, use_noise=False),
n_steps = x.shape[0],
batch_size=1,
init_state=h0val*reset,
use_noise=False)
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
##### Exercise (1): Compute the output intermediate layer
outhid = outhid_activ(emb_state(rec_layer) + emb_words_out(x))
##### Exercise (2): Apply Dropout
outhid = outhid_dropout(outhid, use_noise=False)
if state['shortcut_inpout']:
additional_inputs=[rec_layer, shortcut(x, use_noise=False)]
else:
additional_inputs=[rec_layer]
valid_model = output_layer(outhid,
additional_inputs=additional_inputs,
use_noise=False).validate(target=y, sum_over_time=True)
valid_updates = []
if state['carry_h0']:
valid_updates = [(h0val, nw_h0)]
valid_fn = theano.function([x,y, reset], valid_model.cost,
name='valid_fn', updates=valid_updates)
#### Sampling
##### single-step sampling
def sample_fn(word_tm1, h_tm1):
x_emb = emb_words(word_tm1, use_noise = False, one_step=True)
h0 = rec(x_emb, state_before=h_tm1, one_step=True, use_noise=False)[-1]
outhid = outhid_dropout(outhid_activ(emb_state(h0, use_noise=False, one_step=True) +
emb_words_out(word_tm1, use_noise=False, one_step=True), one_step=True),
use_noise=False, one_step=True)
word = output_layer.get_sample(state_below=outhid, additional_inputs=[h0], temp=1.)
return word, h0
##### scan for iterating the single-step sampling multiple times
[samples, summaries], updates = scan(sample_fn,
states = [
TT.alloc(numpy.int64(0), state['sample_steps']),
TT.alloc(numpy.float32(0), 1, eval(state['nhids'])[-1])],
n_steps= state['sample_steps'],
name='sampler_scan')
##### build a Theano function for sampling
sample_fn = theano.function([], [samples],
updates=updates, profile=False, name='sample_fn')
##### Load a dictionary
dictionary = numpy.load(state['dictionary'])
if state['chunks'] == 'chars':
dictionary = dictionary['unique_chars']
else:
dictionary = dictionary['unique_words']
def hook_fn():
sample = sample_fn()[0]
print 'Sample:',
if state['chunks'] == 'chars':
print "".join(dictionary[sample])
else:
for si in sample:
print dictionary[si],
print
### Build and Train a Model
#### Define a model
model = LM_Model(
cost_layer = train_model,
weight_noise_amount=state['weight_noise_amount'],
valid_fn = valid_fn,
clean_before_noise_fn = False,
noise_fn = None,
rng = rng)
if state['reload']:
model.load(state['prefix']+'model.npz')
#### Define a trainer
##### Training algorithm (SGD)
if state['moment'] < 0:
algo = SGD(model, state, train_data)
else:
algo = SGD_m(model, state, train_data)
##### Main loop of the trainer
main = MainLoop(train_data,
valid_data,
test_data,
model,
algo,
state,
channel,
train_cost = False,
hooks = hook_fn,
validate_postprocess = eval(state['validate_postprocess']))
## Run!
main.main()
if __name__=='__main__':
state = {}
# complete path to data (cluster specific)
state['seqlen'] = 100
state['path']= "/data/lisa/data/PennTreebankCorpus/pentree_char_and_word.npz"
state['dictionary']= "/data/lisa/data/PennTreebankCorpus/dictionaries.npz"
state['chunks'] = 'chars'
state['seed'] = 123
# flag .. don't need to change it. It says what to do if you get cost to
# be nan .. you could raise, though I would leave it to this
state['on_nan'] = 'warn'
# DATA
# For wikipedia validation set is extremely large. Is very time
# wasteful. This value is only used for validation set, and IMHO should
# be something like seqlen * 10000 (i.e. the validation should be only
# 10000 steps
state['reset'] = -1
# For music/ word level I think 50 is a good idea. For character this
# should be at least 100 (I think there are problems with getting state
# of the art otherwise). Note most people use 200 !
# The job stops when learning rate declines to this value. It can be
# useful, because sometimes is hopeless to wait for validation error to
# get below minerr, or for the time to expire
state['minlr'] = float(5e-7)
# Layers
# Input
# Input weights are sampled from a gaussian with std=scale; this is the
# standard way to initialize
state['rank_n_approx'] = 0
state['inp_nhids'] = '[200]'
state['inp_activ'] = '[linear]'
state['inp_bias'] = '[0.]'
state['inp_sparse']= -1 # dense
state['inp_scale'] = .1
# This is for the output weights
state['out_scale'] = .1
state['out_bias_scale'] = -.5
state['out_sparse'] = -1
state['dout_nhid'] = '200'
state['dout_activ'] = '"TT.nnet.sigmoid"'
state['dout_sparse']= 20
state['dout_scale'] = 1.
state['dout_bias'] = '[0]'
state['dout_init'] = "'sample_weights'"
state['dout_rank_n_approx'] = 0
state['dropout'] = .5
# HidLayer
# Hidden units on for the internal layers of DT-RNN. Having a single
# value results in a standard RNN
state['nhids'] = '[100, 100]'
# Activation of each layer
state['rec_activ'] = '"TT.nnet.sigmoid"'
state['rec_bias'] = '.0'
state['rec_sparse'] ='20'
state['rec_scale'] = '1.'
# sample_weights - you rescale the weights such that the largest
# singular value is scale
# sample_weights_classic : just sample weights from a gaussian with std
# equal to scale
state['rec_init'] = "'sample_weights'"
state['rec_layer'] = 'RecurrentMultiLayerShortPathInpAll'
# SGD params
state['bs'] = 1 # the size of the minibatch
state['lr'] = 1. # initial learning rate
state['cutoff'] = 1. # threshold for gradient rescaling
state['moment'] = 0.995 #-.1 # momentum
# Do not optimize these
state['weight_noise'] = True # white Gaussian noise in weights
state['weight_noise_amount'] = 0.075 # standard deviation
# maximal number of updates
state['loopIters'] = int(1e8)
# maximal number of minutes to wait until killing job
state['timeStop'] = 48*60 # 48 hours
# Construct linear connections from input to output. These are factored
# (like the rank_n) to deal with the possible high dimensionality of the
# input, but it is a linear projection that feeds into the softmax
state['shortcut_inpout'] = False
state['shortcut_rank'] = 200
# Main Loop
# Make this to be a decently large value. Otherwise you waste a lot of
# memory keeping track of the training error (and other things) at each
# step + the stdout becomes extremely large
state['trainFreq'] = 100
state['hookFreq'] = 5000
state['validFreq'] = 1000
state['saveFreq'] = 15 # save every 15 minutes
state['prefix'] = 'model_' # prefix of the save files
state['reload'] = False # reload
state['overwrite'] = 1
# Threhold should be 1.004 for PPL, for entropy (which is what
# everything returns, it should be much smaller. Running value is 1.0002
# We should not hyperoptimize this
state['divide_lr'] = 2.
state['cost_threshold'] = 1.0002
state['patience'] = 1
state['validate_postprocess'] = 'lambda x:10**(x/numpy.log(10))'
state['truncate_gradient'] = 80 # truncated BPTT
state['lr_adapt'] = 0 # 1/(1 + n/n0) scheduling
state['lr_beta'] = 10*1900.
state['lr_start'] = 'on_error'
state['no_noise_bias'] = True # do not use weight noise for biases
state['carry_h0'] = True # carry over h0 across updates
state['sample_steps'] = 80
# Do not change these
state['minerr'] = -1
state['shift'] = 1 # n-step forward prediction
state['cutoff_rescale_length'] = False
jobman(state, None)
| bsd-3-clause | 8,476,574,881,672,587,000 | 33.27027 | 92 | 0.577772 | false |
dokterbob/django-shopkit | shopkit/category/advanced/models.py | 1 | 1254 | # Copyright (C) 2010-2011 Mathijs de Bruin <[email protected]>
#
# This file is part of django-shopkit.
#
# django-shopkit is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from django.db import models
from shopkit.category.settings import *
from shopkit.category.basemodels import *
class CategorizedItemBase(models.Model):
""" Abstract base class for an advanced categorized item,
possibly belonging to multiple categories. """
class Meta:
abstract = True
categories = models.ManyToManyField(CATEGORY_MODEL)
""" Categories for the current product. """
| agpl-3.0 | -6,294,358,828,998,404,000 | 37 | 77 | 0.748006 | false |
IQSS/miniverse | dv_apps/metrics/views_maintenance.py | 1 | 2855 | """
Metric views, returning JSON repsonses
"""
from django.shortcuts import render
from django.views.decorators.cache import cache_page
#from django.http import JsonResponse, HttpResponse, Http404
from dv_apps.metrics.stats_util_files import StatsMakerFiles, FILE_TYPE_OCTET_STREAM
from dv_apps.utils.metrics_cache_time import get_metrics_cache_time
from dv_apps.metrics.forms import FixContentTypeForm
FIVE_HOURS = 60 * 60 * 5
"""
from django.core.cache import cache
cache.clear()
"""
@cache_page(get_metrics_cache_time())
def view_all_file_extension_counts(request):
"""Reference table of all file extensions with counts"""
stats_files = StatsMakerFiles()
all_counts = stats_files.view_file_extensions_within_type()
if all_counts and all_counts.result_data:
d = dict(all_counts=all_counts.result_data['records'],
total_file_count=all_counts.result_data['total_file_count'],
number_unique_extensions=all_counts.result_data['number_unique_extensions'],
)
else:
d = dict(all_counts=[],
total_file_count=0,
number_unique_extensions=0,
)
return render(request, 'metrics/view_all_file_extension_counts.html', d)
@cache_page(get_metrics_cache_time())
def view_files_extensions_with_unknown_content_types(request):
"""Reference table of file extensions with unknown content type"""
stats_files = StatsMakerFiles()
unknown_counts = stats_files.view_file_extensions_within_type(FILE_TYPE_OCTET_STREAM)
if unknown_counts and unknown_counts.result_data:
d = dict(unknown_counts=unknown_counts.result_data['records'],
total_file_count=unknown_counts.result_data['total_file_count'],
number_unique_extensions=unknown_counts.result_data['number_unique_extensions'],
all_dv_files_count=unknown_counts.result_data['all_dv_files'],
percent_unknown=unknown_counts.result_data['percent_unknown'])
else:
d = dict(unknown_counts=[],
total_file_count=0,
number_unique_extensions=0,
all_dv_files_count=0,
percent_unknown=0)
return render(request, 'metrics/view_file_extensions_with_unknown_content_types.html', d)
def view_fix_extension(request):
d = {}
if request.POST:
f = FixContentTypeForm(request.POST)
if f.is_valid():
d['fix_instructions'] = f.get_fix_instructions()
#f = FixContentTypeForm()
else:
if request.GET.has_key('ext'):
initial_data = dict(file_extension=request.GET['ext'])
else:
initial_data = {}
f = FixContentTypeForm(initial=initial_data)
d['fix_form'] = f
return render(request, 'metrics/maintenance/view_fix_extension.html', d)
| mit | 8,064,920,246,181,727,000 | 36.565789 | 96 | 0.657443 | false |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/net/data/verify_certificate_chain_unittest/generate-target-has-keycertsign-but-not-ca.py | 5 | 1255 | #!/usr/bin/python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Certificate chain with 1 intermediate, a trusted root, and a target
certificate that is not a CA, and yet has the keyCertSign bit set. Verification
is expected to fail, since keyCertSign should only be asserted when CA is
true."""
import common
# Self-signed root certificate (used as trust anchor).
root = common.create_self_signed_root_certificate('Root')
# Intermediate certificate.
intermediate = common.create_intermediate_certificate('Intermediate', root)
# Target certificate (end entity but has keyCertSign bit set).
target = common.create_end_entity_certificate('Target', intermediate)
target.get_extensions().set_property('keyUsage',
'critical,digitalSignature,keyEncipherment,keyCertSign')
chain = [target, intermediate]
trusted = common.TrustAnchor(root, constrained=False)
time = common.DEFAULT_TIME
verify_result = False
errors = """[Context] Processing Certificate
index: 1
[Error] Target certificate looks like a CA but does not set all CA properties
"""
common.write_test_file(__doc__, chain, trusted, time, verify_result, errors)
| gpl-3.0 | 5,629,663,130,636,404,000 | 35.911765 | 83 | 0.766534 | false |
MostlyOpen/odoo_addons | myo_summary/models/summary_log.py | 1 | 2815 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import api, fields, models
from datetime import *
class SummaryLog(models.Model):
_name = 'myo.summary.log'
summary_id = fields.Many2one('myo.summary', 'Summary', required=True, ondelete='cascade')
user_id = fields.Many2one(
'res.users',
'User',
required=True,
default=lambda self: self.env.user
)
date_log = fields.Datetime(
'When',
required=True,
default=lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S')
)
values = fields.Text(string='Values')
action = fields.Char(string='Action')
notes = fields.Text(string='Notes')
_order = "date_log desc"
class Summary(models.Model):
_inherit = 'myo.summary'
log_ids = fields.One2many('myo.summary.log', 'summary_id', 'Summary Log',
readonly=True)
active_log = fields.Boolean(
'Active Log',
help="If unchecked, it will allow you to disable the log without removing it.",
default=True
)
@api.one
def insert_myo_summary_log(self, summary_id, values, action, notes):
if self.active_log or 'active_log' in values:
vals = {
'summary_id': summary_id,
'values': values,
'action': action,
'notes': notes,
}
self.pool.get('myo.summary.log').create(self._cr, self._uid, vals)
@api.multi
def write(self, values):
action = 'write'
notes = False
for summary in self:
summary.insert_myo_summary_log(summary.id, values, action, notes)
return super(Summary, self).write(values)
@api.model
def create(self, values):
action = 'create'
notes = False
record = super(Summary, self).create(values)
record.insert_myo_summary_log(record.id, values, action, notes)
return record
| agpl-3.0 | 3,287,787,353,323,953,000 | 32.511905 | 93 | 0.590409 | false |
MauHernandez/cyclope | cyclope/migrations/0016_frontend_views_simplified_option.py | 2 | 14467 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
rvs = orm.RegionView.objects.filter(content_view=u"simplified_teaser_list")
mis = orm.MenuItem.objects.filter(content_view=u"simplified_teaser_list")
data_set = list(rvs) + list(mis)
for view in data_set:
view.content_view = u"teaser_list"
view.view_options["simplified"] = True
view.save()
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'collections.collection': {
'Meta': {'object_name': 'Collection'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'default_list_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'navigation_root': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'db_index': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.author': {
'Meta': {'object_name': 'Author'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'origin': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'db_index': 'True'})
},
'cyclope.image': {
'Meta': {'object_name': 'Image'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '100'})
},
'cyclope.layout': {
'Meta': {'object_name': 'Layout'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.menu': {
'Meta': {'object_name': 'Menu'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'db_index': 'True'})
},
'cyclope.menuitem': {
'Meta': {'object_name': 'MenuItem'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'menu_entries'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'content_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'custom_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'menu_items'", 'to': "orm['cyclope.Menu']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': "orm['cyclope.MenuItem']"}),
'persistent_layout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site_home': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"})
},
'cyclope.regionview': {
'Meta': {'object_name': 'RegionView'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'region_views'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'content_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'cyclope.relatedcontent': {
'Meta': {'ordering': "['order']", 'object_name': 'RelatedContent'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'other_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'other_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_rt'", 'to': "orm['contenttypes.ContentType']"}),
'self_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'self_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_lt'", 'to': "orm['contenttypes.ContentType']"})
},
'cyclope.sitesettings': {
'Meta': {'object_name': 'SiteSettings'},
'allow_comments': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'default_layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'global_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'newsletter_collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['collections.Collection']", 'null': 'True', 'blank': 'True'}),
'rss_content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'symmetrical': 'False'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']", 'unique': 'True'}),
'theme': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'cyclope.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'db_index': 'True'})
},
'cyclope.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'about': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cyclope']
| gpl-3.0 | -7,144,526,115,020,062,000 | 79.372222 | 189 | 0.545932 | false |
mslovy/barrelfish | tools/arm_gem5/O3_ARM_v7a.py | 9 | 6472 | # Copyright (c) 2012 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
from m5.objects import *
# Simple ALU Instructions have a latency of 1
class O3_ARM_v7a_Simple_Int(FUDesc):
opList = [ OpDesc(opClass='IntAlu', opLat=1) ]
count = 2
# Complex ALU instructions have a variable latencies
class O3_ARM_v7a_Complex_Int(FUDesc):
opList = [ OpDesc(opClass='IntMult', opLat=3, issueLat=1),
OpDesc(opClass='IntDiv', opLat=12, issueLat=12),
OpDesc(opClass='IprAccess', opLat=3, issueLat=1) ]
count = 1
# Floating point and SIMD instructions
class O3_ARM_v7a_FP(FUDesc):
opList = [ OpDesc(opClass='SimdAdd', opLat=4),
OpDesc(opClass='SimdAddAcc', opLat=4),
OpDesc(opClass='SimdAlu', opLat=4),
OpDesc(opClass='SimdCmp', opLat=4),
OpDesc(opClass='SimdCvt', opLat=3),
OpDesc(opClass='SimdMisc', opLat=3),
OpDesc(opClass='SimdMult',opLat=5),
OpDesc(opClass='SimdMultAcc',opLat=5),
OpDesc(opClass='SimdShift',opLat=3),
OpDesc(opClass='SimdShiftAcc', opLat=3),
OpDesc(opClass='SimdSqrt', opLat=9),
OpDesc(opClass='SimdFloatAdd',opLat=5),
OpDesc(opClass='SimdFloatAlu',opLat=5),
OpDesc(opClass='SimdFloatCmp', opLat=3),
OpDesc(opClass='SimdFloatCvt', opLat=3),
OpDesc(opClass='SimdFloatDiv', opLat=3),
OpDesc(opClass='SimdFloatMisc', opLat=3),
OpDesc(opClass='SimdFloatMult', opLat=3),
OpDesc(opClass='SimdFloatMultAcc',opLat=1),
OpDesc(opClass='SimdFloatSqrt', opLat=9),
OpDesc(opClass='FloatAdd', opLat=5),
OpDesc(opClass='FloatCmp', opLat=5),
OpDesc(opClass='FloatCvt', opLat=5),
OpDesc(opClass='FloatDiv', opLat=9, issueLat=9),
OpDesc(opClass='FloatSqrt', opLat=33, issueLat=33),
OpDesc(opClass='FloatMult', opLat=4) ]
count = 2
# Load/Store Units
class O3_ARM_v7a_Load(FUDesc):
opList = [ OpDesc(opClass='MemRead',opLat=2) ]
count = 1
class O3_ARM_v7a_Store(FUDesc):
opList = [OpDesc(opClass='MemWrite',opLat=2) ]
count = 1
# Functional Units for this CPU
class O3_ARM_v7a_FUP(FUPool):
FUList = [O3_ARM_v7a_Simple_Int(), O3_ARM_v7a_Complex_Int(),
O3_ARM_v7a_Load(), O3_ARM_v7a_Store(), O3_ARM_v7a_FP()]
class O3_ARM_v7a_3(DerivO3CPU):
predType = "tournament"
localPredictorSize = 64
localCtrBits = 2
localHistoryTableSize = 64
localHistoryBits = 6
globalPredictorSize = 8192
globalCtrBits = 2
globalHistoryBits = 13
choicePredictorSize = 8192
choiceCtrBits = 2
BTBEntries = 2048
BTBTagSize = 18
RASSize = 16
instShiftAmt = 2
LQEntries = 16
SQEntries = 16
LSQDepCheckShift = 0
LFSTSize = 1024
SSITSize = 1024
decodeToFetchDelay = 1
renameToFetchDelay = 1
iewToFetchDelay = 1
commitToFetchDelay = 1
renameToDecodeDelay = 1
iewToDecodeDelay = 1
commitToDecodeDelay = 1
iewToRenameDelay = 1
commitToRenameDelay = 1
commitToIEWDelay = 1
fetchWidth = 3
fetchToDecodeDelay = 3
decodeWidth = 3
decodeToRenameDelay = 2
renameWidth = 3
renameToIEWDelay = 1
issueToExecuteDelay = 1
dispatchWidth = 6
issueWidth = 8
wbWidth = 8
wbDepth = 1
fuPool = O3_ARM_v7a_FUP()
iewToCommitDelay = 1
renameToROBDelay = 1
commitWidth = 8
squashWidth = 8
trapLatency = 13
backComSize = 5
forwardComSize = 5
numPhysIntRegs = 128
numPhysFloatRegs = 128
numIQEntries = 32
numROBEntries = 40
defer_registration= False
# Instruction Cache
# All latencys assume a 1GHz clock rate, with a faster clock they would be faster
class O3_ARM_v7a_ICache(BaseCache):
response_latency = '1'
block_size = 64
mshrs = 2
tgts_per_mshr = 8
size = '32kB'
assoc = 2
is_top_level = 'true'
# Data Cache
# All latencys assume a 1GHz clock rate, with a faster clock they would be faster
class O3_ARM_v7a_DCache(BaseCache):
response_latency = '2'
block_size = 64
mshrs = 6
tgts_per_mshr = 8
size = '32kB'
assoc = 2
write_buffers = 16
is_top_level = 'true'
# TLB Cache
# Use a cache as a L2 TLB
class O3_ARM_v7aWalkCache(BaseCache):
response_latency = '4'
block_size = 64
mshrs = 6
tgts_per_mshr = 8
size = '1kB'
assoc = 8
write_buffers = 16
is_top_level = 'true'
# L2 Cache
# All latencys assume a 1GHz clock rate, with a faster clock they would be faster
class O3_ARM_v7aL2(BaseCache):
response_latency = '12'
block_size = 64
mshrs = 16
tgts_per_mshr = 8
size = '1MB'
assoc = 16
write_buffers = 8
prefetch_on_access = 'true'
# Simple stride prefetcher
prefetcher = StridePrefetcher(degree=8, latency=1)
| mit | -926,356,532,002,963,500 | 32.020408 | 81 | 0.662083 | false |
Gustavosdo/django-salmonella | salmonella/admin.py | 1 | 1434 | from salmonella.widgets import SalmonellaIdWidget, SalmonellaMultiIdWidget
class SalmonellaMixin(object):
salmonella_fields = ()
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
if db_field.name in self.salmonella_fields:
try:
kwargs['widget'] = SalmonellaIdWidget(db_field.rel)
except TypeError: # django 1.4+
kwargs['widget'] = SalmonellaIdWidget(db_field.rel, self.admin_site)
return db_field.formfield(**kwargs)
return super(SalmonellaMixin, self).formfield_for_foreignkey(db_field,
request,
**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name in self.salmonella_fields:
try:
kwargs['widget'] = SalmonellaMultiIdWidget(db_field.rel)
except TypeError: # django 1.4+
kwargs['widget'] = SalmonellaMultiIdWidget(db_field.rel, self.admin_site)
kwargs['help_text'] = ''
return db_field.formfield(**kwargs)
return super(SalmonellaMixin, self).formfield_for_manytomany(db_field,
request,
**kwargs)
| mit | -3,950,196,689,673,282,600 | 50.214286 | 89 | 0.521618 | false |
bcl/pykickstart | tests/commands/url.py | 2 | 7541 | #
# Martin Gracik <[email protected]>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
from pykickstart.commands.url import FC3_Url
class Url_TestCase(unittest.TestCase):
def runTest(self):
data1 = FC3_Url()
data2 = FC3_Url()
# test that new objects are always equal
self.assertEqual(data1, data2)
self.assertNotEqual(data1, None)
# test for objects difference
for atr in ['url']:
setattr(data1, atr, '')
setattr(data2, atr, 'test')
# objects that differ in only one attribute
# are not equal
self.assertNotEqual(data1, data2)
self.assertNotEqual(data2, data1)
setattr(data1, atr, '')
setattr(data2, atr, '')
class FC3_TestCase(CommandTest):
command = "url"
def runTest(self):
# pass
self.assert_parse("url --url=http://domain.com", "url --url=\"http://domain.com\"\n")
self.assertFalse(self.assert_parse("url --url=http://domain.com") is None)
self.assertTrue(self.assert_parse("url --url=http://domainA.com") !=
self.assert_parse("url --url=http://domainB.com"))
self.assertFalse(self.assert_parse("url --url=http://domainA.com") ==
self.assert_parse("url --url=http://domainB.com"))
# fail
# missing required option --url
self.assert_parse_error("url")
self.assert_parse_error("url --url")
# extra test coverage
cmd = self.handler().commands[self.command]
cmd.seen = False
self.assertEqual(cmd.__str__(), "")
class F13_TestCase(FC3_TestCase):
def runTest(self):
# run FC3 test case
FC3_TestCase.runTest(self)
# pass
self.assert_parse("url --url=http://someplace/somewhere --proxy=http://wherever/other",
"url --url=\"http://someplace/somewhere\" --proxy=\"http://wherever/other\"\n")
self.assertTrue(self.assert_parse("url --url=http://domain.com --proxy=http://proxy.com") ==
self.assert_parse("url --url=http://domain.com --proxy=http://proxy.com"))
self.assertFalse(self.assert_parse("url --url=http://domain.com --proxy=http://proxyA.com") ==
self.assert_parse("url --url=http://domain.com --proxy=http://proxyB.com"))
# fail
self.assert_parse_error("cdrom --proxy=http://someplace/somewhere")
self.assert_parse_error("url --url=http://someplace/somewhere --proxy")
self.assert_parse_error("url --proxy=http://someplace/somewhere")
class F14_TestCase(F13_TestCase):
def runTest(self):
# run FC6 test case
F13_TestCase.runTest(self)
# pass
self.assert_parse("url --url=https://someplace/somewhere --noverifyssl",
"url --url=\"https://someplace/somewhere\" --noverifyssl\n")
self.assertTrue(self.assert_parse("url --url=https://domain.com --noverifyssl") ==
self.assert_parse("url --url=https://domain.com --noverifyssl"))
self.assertFalse(self.assert_parse("url --url=https://domain.com") ==
self.assert_parse("url --url=https://domain.com --noverifyssl"))
# fail
self.assert_parse_error("cdrom --noverifyssl")
class F18_TestCase(F14_TestCase):
def runTest(self):
# run F14 test case.
F14_TestCase.runTest(self)
# pass
self.assert_parse("url --mirrorlist=http://www.wherever.com/mirror",
"url --mirrorlist=\"http://www.wherever.com/mirror\"\n")
self.assertTrue(self.assert_parse("url --mirrorlist=https://domain.com") ==
self.assert_parse("url --mirrorlist=https://domain.com"))
self.assertFalse(self.assert_parse("url --url=https://domain.com") ==
self.assert_parse("url --mirrorlist=https://domain.com"))
# fail
# missing one of required options --url or --mirrorlist
self.assert_parse_error("url")
self.assert_parse_error("url --mirrorlist")
# It's --url, not --baseurl.
self.assert_parse_error("url --baseurl=www.wherever.com")
# only one of --url or --mirrorlist may be specified
self.assert_parse_error("url --url=www.wherever.com --mirrorlist=www.wherever.com")
# extra test coverage
cmd = self.handler().commands[self.command]
cmd.seen = True
cmd.url = None
cmd.mirrorlist = None
self.assertEqual(cmd.__str__(), "# Use network installation\n\n")
class F27_TestCase(F18_TestCase):
def runTest(self):
# run F18 test case.
F18_TestCase.runTest(self)
# pass
self.assert_parse("url --metalink=http://www.wherever.com/metalink",
"url --metalink=\"http://www.wherever.com/metalink\"\n")
self.assertTrue(self.assert_parse("url --metalink=https://domain.com") == \
self.assert_parse("url --metalink=https://domain.com"))
self.assertFalse(self.assert_parse("url --url=https://domain.com") == \
self.assert_parse("url --metalink=https://domain.com"))
# fail
self.assert_parse_error("url --metalink")
# only one of --url, --mirrorlist, or --metalink may be specified
self.assert_parse_error("url --url=www.wherever.com --metalink=www.wherever.com")
self.assert_parse_error("url --mirrorlist=www.wherever.com --metalink=www.wherever.com")
class F30_TestCase(F27_TestCase):
def runTest(self):
# run F27 test case.
F27_TestCase.runTest(self)
# pass
self.assert_parse("url --url=http://example.com --sslclientcert=file:///foo/bar",
"url --url=\"http://example.com\" --sslclientcert=\"file:///foo/bar\"\n")
self.assert_parse("url --url=http://example.com --sslclientkey=file:///foo/bar",
"url --url=\"http://example.com\" --sslclientkey=\"file:///foo/bar\"\n")
self.assert_parse("url --url=http://example.com --sslcacert=file:///foo/bar",
"url --url=\"http://example.com\" --sslcacert=\"file:///foo/bar\"\n")
# fail: all of these take arguments
self.assert_parse_error("url --url=http://example.com --sslclientcert")
self.assert_parse_error("url --url=http://example.com --sslclientkey")
self.assert_parse_error("url --url=http://example.com --sslcacert")
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | 8,349,713,520,328,970,000 | 41.60452 | 105 | 0.607744 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.